TODO else if response in metrics

migrate
prplV 2025-06-03 18:20:35 +03:00
parent 8ba911385f
commit 888fb41885
16 changed files with 1683 additions and 1223 deletions

View File

@ -184,6 +184,10 @@ pub mod metrics_models {
)]
Full,
// system
#[command(
about = "To capture general host info",
)]
Host,
#[command(
about = "To capture detailed CPU metrics",
)]

View File

@ -2,27 +2,27 @@ mod options;
mod utils;
use log::{error, info};
use options::cli_pipeline::init_cli_pipeline;
use options::config::v2::init_config_mechanism;
use options::logger::setup_logger;
use options::preboot::PrebootParams;
use options::signals::set_valid_destructor;
use options::structs::ProcessUnit;
use options::structs::{Processes, bus::BusMessage};
use options::cli_pipeline::init_cli_pipeline;
use options::structs::{bus::BusMessage, Processes};
use std::sync::Arc;
use std::time::Duration;
use options::preboot::PrebootParams;
use tokio::sync::{broadcast, oneshot, mpsc};
use options::config::v2::init_config_mechanism;
use utils::v2::init_monitoring;
use tokio::sync::{broadcast, mpsc, oneshot};
use utils::bus::Bus;
use utils::metrics::init_metrics_grubber;
use utils::v2::init_monitoring;
#[tokio::main(flavor = "multi_thread", worker_threads = 4)]
async fn main() -> anyhow::Result<()>{
async fn main() -> anyhow::Result<()> {
let preboot = Arc::new(PrebootParams::validate());
let _ = setup_logger();
info!("Noxis is configurating...");
//
//
let (tx_brd, mut rx_brd) = broadcast::channel::<Processes>(1);
// for cli to get config
let mut rx_cli_brd = tx_brd.subscribe();
@ -30,7 +30,7 @@ async fn main() -> anyhow::Result<()>{
let (tx_oneshot, rx_oneshot) = oneshot::channel::<Processes>();
let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![];
// to BUS channel
// to BUS channel
let (tx_to_bus, rx_to_bus) = mpsc::channel::<BusMessage>(5);
// from BUS channels
let (tx_to_cli, rx_to_cli) = mpsc::channel::<BusMessage>(5);
@ -43,24 +43,25 @@ async fn main() -> anyhow::Result<()>{
let tx_to_metrics = Arc::new(tx_to_metrics);
let bus_module = tokio::spawn(async move {
let mut bus = Bus::new(rx_to_bus, tx_to_cli.clone(), tx_to_supervisor.clone(), tx_to_metrics.clone());
let mut bus = Bus::new(
rx_to_bus,
tx_to_cli.clone(),
tx_to_supervisor.clone(),
tx_to_metrics.clone(),
);
bus.process().await;
error!("Info Bus crushed !");
});
handler.push(bus_module);
// initilaizing task for config manipulations
let preboot_config = preboot.clone();
let preboot_config = preboot.clone();
let config_module = tokio::spawn(async move {
let _ = init_config_mechanism(
rx_oneshot,
tx_brd,
preboot_config
).await;
let _ = init_config_mechanism(rx_oneshot, tx_brd, preboot_config).await;
});
handler.push(config_module);
// initilaizing task for cli manipulation
// initilaizing task for cli manipulation
let tx_bus = tx_to_bus.clone();
let preboot_cli = preboot.clone();
let cli_module = tokio::spawn(async move {
@ -71,28 +72,27 @@ async fn main() -> anyhow::Result<()>{
break match rx_cli_brd.try_recv() {
Ok(conf) => conf,
Err(_) => continue,
}
};
}
};
if let Err(er) = init_cli_pipeline(
preboot_cli,
preboot_cli,
Arc::new(config),
tx_oneshot,
rx_to_cli,
tx_bus.clone()
).await {
tx_bus.clone(),
)
.await
{
error!("CLI pipeline failed due to {}", er)
}
});
});
handler.push(cli_module);
// metrics
let tx_bus = tx_to_bus.clone();
let metrics_module = tokio::spawn(async move {
if let Err(er) = init_metrics_grubber(
tx_bus.clone(),
rx_to_metrics
).await {
let metrics_module = tokio::spawn(async move {
if let Err(er) = init_metrics_grubber(tx_bus.clone(), rx_to_metrics).await {
error!("Metrics module crushed : {}", er);
}
});
@ -116,14 +116,10 @@ async fn main() -> anyhow::Result<()>{
break match rx_brd.try_recv() {
Ok(conf) => conf,
Err(_) => continue,
}
};
}
};
if let Err(er) = init_monitoring(
config,
rx_to_supervisor,
tx_bus
).await {
if let Err(er) = init_monitoring(config, rx_to_supervisor, tx_bus).await {
error!("Monitoring mod failed due to {}", er);
}
});
@ -133,4 +129,4 @@ async fn main() -> anyhow::Result<()>{
let _ = i.await;
}
Ok(())
}
}

View File

@ -1,8 +1,8 @@
// ! gathering optional items module
pub mod cli_pipeline;
pub mod config;
pub mod logger;
pub mod preboot;
pub mod signals;
pub mod structs;
pub mod preboot;
pub mod cli_pipeline;

View File

@ -1,15 +1,15 @@
use super::structs::bus::BusMessage;
use super::structs::Processes;
use crate::options::structs::bus::InternalCli;
use log::{error, info};
use tokio::net::{ UnixStream, UnixListener };
use tokio::sync::{Mutex, OnceCell};
use tokio::time::{sleep, Duration};
use noxis_cli::{Cli, ProcessAction};
use std::any::Any;
use std::fs;
use std::sync::Arc;
use tokio::io::{ AsyncWriteExt, AsyncReadExt};
use noxis_cli::{Cli, ProcessAction};
use crate::options::structs::bus::InternalCli;
use super::structs::Processes;
use super::structs::bus::BusMessage;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{UnixListener, UnixStream};
use tokio::sync::{Mutex, OnceCell};
use tokio::time::{sleep, Duration};
use super::preboot::PrebootParams;
@ -21,32 +21,28 @@ type ReadyBusReciever = Arc<Mutex<tokio::sync::mpsc::Receiver<BusMessage>>>;
/// # Fn `init_cli_pipeline`
/// ## for catching all input requests from CLI
///
///
/// *input* : -
///
/// *output* : `anyhow::Result<()>` to wrap errors
/// *output* : `anyhow::Result<()>` to wrap errors
///
/// *initiator* : fn `main`
///
/// *managing* : `TcpListener` object to handle requests
/// *managing* : `TcpListener` object to handle requests
///
/// *depends on* : -
///
///
pub async fn init_cli_pipeline(
params: Arc<PrebootParams>,
config : Arc<Processes>,
config_gateway : ConfigGateway,
bus_reciever : BusReciever,
bus_sender : BusSender,
params: Arc<PrebootParams>,
config: Arc<Processes>,
config_gateway: ConfigGateway,
bus_reciever: BusReciever,
bus_sender: BusSender,
) -> anyhow::Result<()> {
let socket_path = &params.self_socket;
let _ = fs::remove_file(socket_path);
let config_gateway = Arc::new(
Mutex::new(
OnceCell::new_with(Some(config_gateway))
)
);
let config_gateway = Arc::new(Mutex::new(OnceCell::new_with(Some(config_gateway))));
let bus_reciever = Arc::new(Mutex::new(bus_reciever));
match UnixListener::bind(socket_path) {
@ -57,72 +53,87 @@ pub async fn init_cli_pipeline(
loop {
match list.accept().await {
Ok((socket, _)) => {
// ??? maybe errors on async work with data transfering between modules
// ??? maybe errors on async work with data transfering between modules
let params = params.clone();
let config = config.clone();
let config_gateway = config_gateway.clone();
let bus_reciever = bus_reciever.clone();
let bus_sender = bus_sender.clone();
tokio::spawn(async move {
process_connection(socket, params.clone(), config.clone(), config_gateway.clone(), bus_reciever, bus_sender).await;
process_connection(
socket,
params.clone(),
config.clone(),
config_gateway.clone(),
bus_reciever,
bus_sender,
)
.await;
});
},
}
Err(er) => {
error!("Cannot poll connection to CLI due to {}", er);
sleep(Duration::from_millis(300)).await;
},
}
}
}
// Ok(())
},
}
Err(er) => {
error!("Failed to open UnixListener for CLI");
Err(er.into())
},
}
}
}
/// # Fn `process_connection`
/// ## for processing input CLI requests
///
///
/// *input* : mut stream: `TcpStream`
///
/// *output* : -
/// *output* : -
///
/// *initiator* : fn `init_cli_pipeline`
///
/// *managing* : mutable object of `TcpStream`
///
/// *depends on* : `tokio::net::TcpStream`
///
///
async fn process_connection(
mut stream: UnixStream,
params: Arc<PrebootParams>,
config : Arc<Processes>,
cfg_gateway : ProcessedConfigGateway,
bus_reciever : ReadyBusReciever,
bus_sender : BusSender,
) {
mut stream: UnixStream,
params: Arc<PrebootParams>,
config: Arc<Processes>,
cfg_gateway: ProcessedConfigGateway,
bus_reciever: ReadyBusReciever,
bus_sender: BusSender,
) {
let mut buf = vec![0; 1024];
match stream.read(&mut buf).await {
Ok(0) => {
info!("Client disconnected ");
},
}
Ok(n) => {
buf.truncate(n);
info!("CLI have sent {} bytes", n);
match serde_json::from_slice::<Cli>(&buf) {
Ok(cli) => {
info!("Received CLI request: {:?}", cli);
let response = match process_cli_cmd(cli, params.clone(), config, cfg_gateway.clone(), bus_reciever.clone(), bus_sender.clone()).await {
Ok(response) => {
response
},
let response = match process_cli_cmd(
cli,
params.clone(),
config,
cfg_gateway.clone(),
bus_reciever.clone(),
bus_sender.clone(),
)
.await
{
Ok(response) => response,
Err(er) => {
let error_msg = format!("Error: {}", er);
error!("{}", &error_msg);
error_msg
},
}
};
for line in response.lines() {
if let Err(er) = stream.write_all(line.as_bytes()).await {
@ -134,36 +145,35 @@ async fn process_connection(
error!("Failed to parse CLI request: {}", e);
}
}
},
}
Err(e) => error!("Failed to read from socket: {}", e),
}
let _ = stream.shutdown().await;
}
async fn process_cli_cmd(
cli : Cli,
params: Arc<PrebootParams>,
global_config : Arc<Processes>,
cfg_gateway: ProcessedConfigGateway,
bus_reciever : ReadyBusReciever,
bus_sender : BusSender,
cli: Cli,
params: Arc<PrebootParams>,
global_config: Arc<Processes>,
cfg_gateway: ProcessedConfigGateway,
bus_reciever: ReadyBusReciever,
bus_sender: BusSender,
) -> anyhow::Result<String> {
use noxis_cli::{Commands, ConfigAction};
return match cli.command {
Commands::Config(config) => {
match config.action {
ConfigAction::Show(env ) => {
ConfigAction::Show(env) => {
if env.is_env {
Ok(serde_json::to_string_pretty(params.as_ref())?)
} else {
/* */
Ok(serde_json::to_string_pretty(global_config.as_ref())?)
}
},
ConfigAction::Reset => {
Err(anyhow::Error::msg("It's temporarly forbidden to reset current config using CLI-util"))
},
}
ConfigAction::Reset => Err(anyhow::Error::msg(
"It's temporarly forbidden to reset current config using CLI-util",
)),
ConfigAction::Local(cfg) => {
if cfg.is_json {
/* */
@ -171,7 +181,7 @@ async fn process_cli_cmd(
let new_version = new_config.get_version().to_string();
use super::{config::config_comparing, structs::ConfigActuality};
return match config_comparing(&global_config, &new_config) {
ConfigActuality::Remote => {
let cfg_gateway = cfg_gateway.clone();
@ -180,71 +190,102 @@ async fn process_cli_cmd(
match lock.take() {
Some(channel) => {
let _ = channel.send(new_config);
},
None => error!("Cannot update confif due to channel sender loss"),
}
None => error!(
"Cannot update confif due to channel sender loss"
),
}
});
Ok(format!("Ok. Saving and reloading with version {}", new_version))
},
_ => Err(anyhow::Error::msg(format!("Local config (version: {}) is more actual", global_config.get_version()))),
}
Ok(format!(
"Ok. Saving and reloading with version {}",
new_version
))
}
_ => Err(anyhow::Error::msg(format!(
"Local config (version: {}) is more actual",
global_config.get_version()
))),
};
} else {
Err(anyhow::Error::msg("It's temporarly forbidden to set config in non-json mode"))
}
},
ConfigAction::Remote => {Ok(params.remote_server_url.clone())},
Err(anyhow::Error::msg(
"It's temporarly forbidden to set config in non-json mode",
))
}
}
ConfigAction::Remote => Ok(params.remote_server_url.clone()),
/* */
// _ => Err(anyhow::Error::msg("Unrecognized command from CLI"))
}
},
}
Commands::Processes => {
use crate::options::structs::bus::{BusMessageContentType, BusMessageDirection};
use crate::utils::metrics::processes::ProcessesQuery;
let _ = bus_sender
.send(BusMessage::Request(
BusMessageDirection::ToSupervisor,
BusMessageContentType::ProcessQuery,
Box::new(ProcessesQuery::QueryAll),
))
.await;
let mut bus = bus_reciever.lock().await;
let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move {
loop {
if let Ok(cont) = bus.try_recv() {
return cont;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
})
.await?;
if let BusMessage::Response(_, _, content) = resp {
let content: Box<dyn Any> = content;
if let Ok(resp) = content.downcast::<anyhow::Result<String>>() {
return Ok((*resp)?);
}
}
Err(anyhow::Error::msg(format!(
"Unknown type of response from the Supervisor"
)))
}
Commands::Process(prc) => {
use crate::options::structs::bus::{BusMessageDirection, BusMessageContentType, CLiCommand};
use crate::options::structs::bus::{
BusMessageContentType, BusMessageDirection, CLiCommand,
};
let proc_name = prc.process;
let req = BusMessage::Request(
BusMessageDirection::ToSupervisor,
BusMessageContentType::Cli,
Box::new(
match prc.action {
ProcessAction::Start => {
InternalCli {
prc : proc_name,
cmd : CLiCommand::Start,
}
BusMessageContentType::Cli,
Box::new(match prc.action {
ProcessAction::Start => InternalCli {
prc: proc_name,
cmd: CLiCommand::Start,
},
ProcessAction::Stop => {
InternalCli {
prc : proc_name,
cmd : CLiCommand::Stop,
}
ProcessAction::Stop => InternalCli {
prc: proc_name,
cmd: CLiCommand::Stop,
},
ProcessAction::Restart => {
InternalCli {
prc : proc_name,
cmd : CLiCommand::Restart,
}
ProcessAction::Restart => InternalCli {
prc: proc_name,
cmd: CLiCommand::Restart,
},
ProcessAction::Freeze => {
InternalCli {
prc : proc_name,
cmd : CLiCommand::Freeze,
}
ProcessAction::Freeze => InternalCli {
prc: proc_name,
cmd: CLiCommand::Freeze,
},
ProcessAction::Unfreeze => {
InternalCli {
prc : proc_name,
cmd : CLiCommand::Unfreeze,
}
ProcessAction::Unfreeze => InternalCli {
prc: proc_name,
cmd: CLiCommand::Unfreeze,
},
/* TODO: ALL CMDS */
_ => {
InternalCli {
prc : proc_name,
cmd : CLiCommand::Restart,
}
_ => InternalCli {
prc: proc_name,
cmd: CLiCommand::Restart,
},
}
)
}),
);
let mut bus = bus_reciever.lock().await;
bus_sender.send(req).await?;
@ -252,52 +293,58 @@ async fn process_cli_cmd(
let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move {
loop {
if let Ok(cont) = bus.try_recv() {
return cont
return cont;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
}).await?;
})
.await?;
if let BusMessage::Response(_, _, content) = resp {
let content: Box<dyn Any> = content;
if let Ok(resp) = content.downcast::<anyhow::Result<String>>() {
return Ok((*resp)?)
return Ok((*resp)?);
}
}
Err(anyhow::Error::msg(format!("Unknown type of response from supervisor")))
},
/* */
Err(anyhow::Error::msg(format!(
"Unknown type of response from the Supervisor"
)))
}
Commands::Status => Ok(String::from("Ok")),
Commands::Inspect(mode) => {
use crate::options::structs::bus::{BusMessageDirection, BusMessageContentType};
use crate::options::structs::bus::{BusMessageContentType, BusMessageDirection};
let mode = mode.mode;
if let Ok(()) = bus_sender.send(BusMessage::Request(
BusMessageDirection::ToMetrics,
BusMessageContentType::MetricsModeTransfered,
Box::new(mode)
)).await {
if let Ok(()) = bus_sender
.send(BusMessage::Request(
BusMessageDirection::ToMetrics,
BusMessageContentType::MetricsModeTransfered,
Box::new(mode),
))
.await
{
let mut bus_reciever = bus_reciever.lock().await;
sleep(Duration::from_millis(300)).await;
let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move {
loop {
if let Ok(cont) = bus_reciever.try_recv() {
return cont
return cont;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
}).await?;
})
.await?;
if let BusMessage::Response(_, _, content) = resp {
let content: Box<dyn Any> = content;
if let Ok(resp) = content.downcast::<anyhow::Result<String>>() {
// let mut file = std::fs::File::create("output.json")?;
// file.write_all(resp.unwrap_or_else(|_| String::from("no")).)?;
return Ok((*resp)?)
return Ok((*resp)?);
}
}
return Err(anyhow::Error::msg(format!("Unknown type of response from CLI")));
return Err(anyhow::Error::msg(format!(
"Unknown type of response from CLI"
)));
}
Ok(String::from("Ok"))
},
_ => Ok(String::from("Ok"))
}
}
_ => Ok(String::from("Ok")),
};
}

View File

@ -1,83 +1,68 @@
use super::preboot::PrebootParams;
use super::structs::*;
use crate::utils::files::create_watcher;
use inotify::EventMask;
use log::{error, info, warn};
use redis::{Client, Connection};
use std::fs::File;
use std::fs::OpenOptions;
use std::io::Write;
use std::os::unix::process::CommandExt;
use std::process::Command;
use std::sync::Arc;
use std::{env, fs};
use super::preboot::PrebootParams;
use tokio::time::{Duration, sleep};
use tokio::sync::{
use tokio::sync::{
broadcast::Receiver as BroadcastReceiver,
broadcast::Sender as BroadcastSender,
oneshot,
oneshot::{ Receiver as OneShotReciever, Sender as OneShotSender },
broadcast::Sender as BroadcastSender, broadcast::Receiver as BroadcastReceiver };
use crate::utils::files::create_watcher;
use std::fs::File;
use inotify::EventMask;
oneshot::{Receiver as OneShotReciever, Sender as OneShotSender},
};
use tokio::time::{sleep, Duration};
// const CONFIG_PATH: &str = "settings.json";
pub mod v2 {
use std::path::PathBuf;
use crate::utils::get_container_id;
use std::path::PathBuf;
use super::*;
pub async fn init_config_mechanism(
// to handle cli config changes
// to handle cli config changes
cli_oneshot: OneShotReciever<Processes>,
// to share local config with PRCS, CLI_PIPELINE and CONFIG modules
brd_tx : BroadcastSender<Processes>,
// preboot params (args)
params : Arc<PrebootParams>
/*...*/
) {
// to share local config with PRCS, CLI_PIPELINE and CONFIG modules
brd_tx: BroadcastSender<Processes>,
// preboot params (args)
params: Arc<PrebootParams>, /*...*/
) {
// channel for pubsub to handle local config pulling
let local_config_brd_reciever = brd_tx.subscribe();
// channel between pub-sub mech and local config mech
// channel between pub-sub mech and local config mech
let (tx_pb_lc, rx_pb_lc) = oneshot::channel::<bool>();
// channel between cli mech and local config mech
// channel between cli mech and local config mech
let (tx_cli_lc, rx_cli_lc) = oneshot::channel::<bool>();
// dbg!("before lc");
let params_clone = params.clone();
let for_lc_path = params.clone();
let lc_path = for_lc_path
.config
.to_str()
.unwrap_or("settings.json");
let lc_path = for_lc_path.config.to_str().unwrap_or("settings.json");
// future to init work with local config
let lc_future = tokio::spawn(
// let params = params.clone();
local_config_reciever(
params_clone,
rx_pb_lc,
rx_cli_lc,
Arc::new(brd_tx)
)
local_config_reciever(params_clone, rx_pb_lc, rx_cli_lc, Arc::new(brd_tx)),
);
// dbg!("before pb");
// future to init work with pub sub mechanism
let pubsub_future = tokio::spawn(
pubsub_config_reciever(
tx_pb_lc,
params.clone(),
local_config_brd_reciever
)
);
let pubsub_future = tokio::spawn(pubsub_config_reciever(
tx_pb_lc,
params.clone(),
local_config_brd_reciever,
));
// dbg!("before cli");
// future to catch new configs from cli pipeline
let cli_future = tokio::spawn(
from_cli_config_reciever(
cli_oneshot,
tx_cli_lc
)
);
let cli_future = tokio::spawn(from_cli_config_reciever(cli_oneshot, tx_cli_lc));
// let _ = lc_future.await;
// dbg!("before select");
tokio::select! {
@ -106,7 +91,7 @@ pub mod v2 {
Ok(res) => {
if res.is_ok() {
info!("New config was saved locally, restarting ...");
}
}
else {
error!("Pubsub mechanism crushed, restarting ...");
}
@ -135,12 +120,15 @@ pub mod v2 {
}
// dbg!("after select");
// TODO! futures + select! [OK]
// TODO! tests config
// TODO! tests config
}
pub async fn get_redis_connection(params: &str) -> Option<Connection> {
for i in 1..=3 {
let redis_url = format!("redis://{}/", params);
info!("Trying to connect Redis pubsub `{}`. Attempt {}", &redis_url, i);
info!(
"Trying to connect Redis pubsub `{}`. Attempt {}",
&redis_url, i
);
if let Ok(client) = Client::open(redis_url) {
if let Ok(conn) = client.get_connection() {
info!("Successfully opened Redis connection");
@ -153,13 +141,13 @@ pub mod v2 {
None
}
// loop checking redis pubsub
// loop checking redis pubsub
async fn pubsub_config_reciever(
// to stop checking local config
local_conf_tx : OneShotSender<bool>,
params : Arc<PrebootParams>,
tx_brd_local : BroadcastReceiver<Processes>,
) -> anyhow::Result<()>{
local_conf_tx: OneShotSender<bool>,
params: Arc<PrebootParams>,
tx_brd_local: BroadcastReceiver<Processes>,
) -> anyhow::Result<()> {
/*...*/
// dbg!("start of pb");
let mut tx_brd_local = tx_brd_local;
@ -180,19 +168,24 @@ pub mod v2 {
Some(mut conn) => {
let mut pub_sub = conn.as_pubsub();
let channel_name = get_container_id().unwrap_or(String::from("default"));
let channel_name = channel_name.trim();
match pub_sub.subscribe(channel_name) {
match pub_sub.subscribe(&channel_name) {
Err(er) => {
error!("Cannot subscribe pubsub channel due to {}", &er);
return Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er)))
},
return Err(anyhow::Error::msg(format!(
"Cannot subscribe pubsub channel due to {}",
er
)));
}
Ok(_) => {
info!("Successfully subscribed to {} pubsub channel", channel_name);
info!(
"Successfully subscribed to {} pubsub channel",
&channel_name
);
let _ = pub_sub.set_read_timeout(Some(Duration::from_secs(1)));
loop {
if let Ok(msg) = pub_sub.get_message() {
// dbg!("ok on get message");
let payload : Result<String, _> = msg.get_payload();
let payload: Result<String, _> = msg.get_payload();
match payload {
Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "),
Ok(payload) => {
@ -223,38 +216,35 @@ pub mod v2 {
},
}
}
// delay
// delay
tokio::task::yield_now().await;
}
},
}
}
},
}
None => {
sleep(Duration::from_secs(20)).await;
}
}
Ok(())
}
//
//
async fn local_config_reciever(
params : Arc<PrebootParams>,
pubsub_oneshot : OneShotReciever<bool>,
cli_oneshot : OneShotReciever<bool>,
brd_tx : Arc<BroadcastSender<Processes>>,
params: Arc<PrebootParams>,
pubsub_oneshot: OneShotReciever<bool>,
cli_oneshot: OneShotReciever<bool>,
brd_tx: Arc<BroadcastSender<Processes>>,
/*...*/
) -> anyhow::Result<()> {
) -> anyhow::Result<()> {
/*...*/
// shadowing as mut
let mut pubsub_oneshot = pubsub_oneshot;
// shadowing as mut
let mut pubsub_oneshot = pubsub_oneshot;
let mut cli_oneshot = cli_oneshot;
// fill with default empty config, mut to change later
let mut _current_config = Processes::default();
// PathBuf to &str to work with local config path as slice
let local_config_path = params
.config
.to_str()
.unwrap_or("settings.json");
let local_config_path = params.config.to_str().unwrap_or("settings.json");
match load_processes(local_config_path) {
// if local exists
@ -264,30 +254,30 @@ pub mod v2 {
if let Err(er) = brd_tx.send(_current_config.clone()) {
error!("Cannot share local config with broadcast due to {}", er);
}
},
}
// if local is not exist
None => {
warn!("Local config wasn't found. Waiting for new ...");
return Err(anyhow::Error::msg("No local config"));
// ...
},
}
}
// 100% local exists here
// 100% local exists here
// create watcher on local config file
match create_watcher("", local_config_path) {
Ok(mut watcher) => {
loop {
let mut need_to_export_config = false;
let mut need_to_export_config = false;
// let mut need_to_recreate_watcher = false;
// return situations here
// 1) oneshot signal
// return situations here
// 1) oneshot signal
// 2) if config was deleted -> recreate and fill with current config that is held here
// 3) if config was changed -> fill with current config that is held here
// catching signal from pubsub
// it's because pubsub mech pulled new valid and actual config and now it's time to ...
// ... overwrite local config file and restart main thread
// ... overwrite local config file and restart main thread
if let Ok(_) = pubsub_oneshot.try_recv() {
sleep(Duration::from_secs(1)).await;
return Ok(());
@ -295,7 +285,7 @@ pub mod v2 {
// catching signal from cli
// it's because cli mech pulled new valid and actual config and now it's time to ...
// ... overwrite local config file and restart main thread (like in previous mechanism)
// ... overwrite local config file and restart main thread (like in previous mechanism)
if let Ok(_) = cli_oneshot.try_recv() {
sleep(Duration::from_secs(1)).await;
return Ok(());
@ -312,7 +302,7 @@ pub mod v2 {
} else {
// changes check
let mut buffer = [0; 128];
let events = watcher.read_events(&mut buffer);
let events = watcher.read_events(&mut buffer);
if events.is_ok() {
let events: Vec<EventMask> = events
.unwrap()
@ -329,7 +319,9 @@ pub mod v2 {
}
// exporting data
if need_to_export_config {
if let Err(er) = export_saved_config_data_locally(&params.config, &_current_config).await {
if let Err(er) =
export_saved_config_data_locally(&params.config, &_current_config).await
{
error!("Cannot save actual imported config due to {}", er);
} else {
// recreation watcher (draining activity buffer mechanism)
@ -344,66 +336,65 @@ pub mod v2 {
sleep(Duration::from_millis(300)).await;
// tokio::task::yield_now().await;
}
},
}
Err(_) => {
error!("Cannot create watcher on local config file `{}`. Deinitializing warding local config mechanism...", local_config_path);
return Err(anyhow::Error::msg("Cannot create watcher on local config file"));
},
return Err(anyhow::Error::msg(
"Cannot create watcher on local config file",
));
}
}
}
// [:IN-TEST]
async fn from_cli_config_reciever(
cli_oneshot: OneShotReciever<Processes>,
to_local_tx: OneShotSender<bool>
) -> Option<Processes> {
to_local_tx: OneShotSender<bool>,
) -> Option<Processes> {
/* match awaits til channel*/
// dbg!("start of cli");
loop {
if !cli_oneshot.is_empty() {
match cli_oneshot.await {
Ok(config_from_cli) => {
info!("New actual config `{}` from CLI was pulled. Saving and restaring ...", &config_from_cli.date_of_creation);
info!(
"New actual config `{}` from CLI was pulled. Saving and restaring ...",
&config_from_cli.date_of_creation
);
let _ = to_local_tx.send(true);
return Some(config_from_cli)
},
return Some(config_from_cli);
}
_ => return None,
}
}
}
sleep(Duration::from_millis(300)).await;
}
}
async fn export_saved_config_data_locally(
config_file_path: &PathBuf,
current_config: &Processes
config_file_path: &PathBuf,
current_config: &Processes,
) -> anyhow::Result<()> {
let mut file = File::create(config_file_path)?;
file.write_all(
serde_json::to_string_pretty(current_config)?.as_bytes()
)?;
file.write_all(serde_json::to_string_pretty(current_config)?.as_bytes())?;
Ok(())
// Ok(())
}
}
/// # Fn `load_processes`
/// ## for reading and parsing *local* storing config
///
/// ## for reading and parsing *local* storing config
///
/// *input* : `&str`
///
/// *output* : `None` if local conf file doesn't exist or invalid | `Some(conf)` on finish reading and parsing
/// *output* : `None` if local conf file doesn't exist or invalid | `Some(conf)` on finish reading and parsing
///
/// *initiator* : func `get_actual_config`
///
/// *managing* : conf file name in `&str` format
/// *managing* : conf file name in `&str` format
///
/// *depends on* : struct `Processes`
///
///
fn load_processes(json_filename: &str) -> Option<Processes> {
if let Ok(res) = fs::read_to_string(json_filename) {
if let Ok(conf) = serde_json::from_str::<Processes>(&res) {
@ -415,7 +406,7 @@ fn load_processes(json_filename: &str) -> Option<Processes> {
/// # Fn `restart_main_thread`
/// ## for restart monitor with new config
///
///
/// *input* : -
///
/// *output* : `Ok(())` on valid restart | `Err(er)` on error
@ -425,7 +416,7 @@ fn load_processes(json_filename: &str) -> Option<Processes> {
/// *managing* : -
///
/// *depends on* : -
///
///
fn restart_main_thread() -> std::io::Result<()> {
let current_exe = env::current_exe()?;
let _ = Command::new(current_exe).exec();
@ -434,7 +425,7 @@ fn restart_main_thread() -> std::io::Result<()> {
/// # Fn `config_comparing`
/// ## for compare old and new configs
///
///
/// *input* : local: `&Processes`, remote: `&Processes`
///
/// *output* : `ConfigActuality::Local` or `ConfigActuality::Remote`
@ -444,7 +435,7 @@ fn restart_main_thread() -> std::io::Result<()> {
/// *managing* : two objects `&Processes`
///
/// *depends on* : `Processes`, `ConfigActuality`
///
///
pub fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActuality {
if local.is_default() {
return ConfigActuality::Remote;
@ -460,7 +451,7 @@ pub fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActualit
/// # Fn `save_new_config`
/// ## mechanism for saving new config in local storage
///
///
/// *input* : `&Processes`, `&str`
///
/// *output* : `Ok(())` on succesfull saving | Err(er) on fs error
@ -470,7 +461,7 @@ pub fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActualit
/// *managing* : new config object: `&Processes` and config file name: `&str`
///
/// *depends on* : `Processes`
///
///
fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomError> {
match serde_json::to_string_pretty(&config) {
// Ok(st) => match fs::write(config_file, st) {
@ -501,7 +492,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr
/// # Fn `parse_extern_config`
/// ## for parsing &str to Processes
///
///
/// *input* : `&str`
///
/// *output* : parsed config in Some(Processes) | None on error with parsing
@ -511,7 +502,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr
/// *managing* : unparsed config `&str`
///
/// *depends on* : `Processes`
///
///
fn parse_extern_config(json_string: &str) -> Option<Processes> {
if let Ok(des) = serde_json::from_str::<Processes>(json_string) {
return Some(des);

View File

@ -15,7 +15,7 @@ use crate::utils::get_container_id;
/// # Fn `setup_logger`
/// ## for initializing process of unstoppable grubbing metrics.
///
///
/// *input* : `Result<()>`
///
/// *output* : `Err` if it cant create logger | `Ok` after logger initialing
@ -25,7 +25,7 @@ use crate::utils::get_container_id;
/// *managing* : -
///
/// *depends on* : -
///
///
pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> {
// if Command::new("sh").args(["-c", "mkdir logs"]).output().is_err() {
// println!("Error: Cannot init logs directory");
@ -65,19 +65,17 @@ trait FromEnv {
impl FromEnv for LevelFilter {
fn from_env() -> LevelFilter {
return match std::env::var("NOXIS_MAX_LOG_LEVEL") {
Ok(var) => {
match var.to_ascii_lowercase().trim().as_ref() {
"trace" => LevelFilter::Trace,
"debug" => LevelFilter::Debug,
"info" => LevelFilter::Info,
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"off" => LevelFilter::Off,
_ => LevelFilter::Info,
}
Ok(var) => match var.to_ascii_lowercase().trim().as_ref() {
"trace" => LevelFilter::Trace,
"debug" => LevelFilter::Debug,
"info" => LevelFilter::Info,
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"off" => LevelFilter::Off,
_ => LevelFilter::Info,
},
Err(_) => LevelFilter::Info,
}
};
}
}

View File

@ -1,25 +1,25 @@
//!
//!
//! Module to handle `pre-boot params` of the monitor (calling also as `settings`)
//!
//!
#[allow(unused_imports)]
use anyhow::{Result, Error};
use log::warn;
use std::path::PathBuf;
use std::env::var;
use anyhow::{Error, Result};
use dotenv::dotenv;
use log::warn;
use std::env::var;
use std::path::PathBuf;
/// # Enum `MetricsPrebootParams`
/// ## for setting up metrics mode as preboot param from command prompt
///
/// ## for setting up metrics mode as preboot param from command prompt
///
/// examples:
/// ``` bash
/// noxis-rs ... --metrics full
/// noxis-rs ... --metrics system
/// noxis-rs ... --metrics full
/// noxis-rs ... --metrics system
/// noxis-rs ... --metrics processes
/// noxis-rs ... --metrics net
/// noxis-rs ... --metrics none
/// noxis-rs ... --metrics none
/// ```
///
///
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub enum MetricsPrebootParams {
Full,
@ -43,9 +43,9 @@ impl MetricsPrebootParams {
}
/// # `std::fmt::Display` implementation for `MetricsPrebootParams`
/// ## to enable parsing object to String
impl std::fmt::Display for MetricsPrebootParams {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
/// ## to enable parsing object to String
impl std::fmt::Display for MetricsPrebootParams {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
MetricsPrebootParams::Full => write!(f, "full"),
MetricsPrebootParams::System => write!(f, "system"),
@ -53,25 +53,25 @@ impl std::fmt::Display for MetricsPrebootParams {
MetricsPrebootParams::Net => write!(f, "net"),
MetricsPrebootParams::None => write!(f, "none"),
}
}
}
}
}
/// struct to handle Noxis settings (that were set as `.env` or global env vars)
///
///
/// to parse and set up all modes as preboot params from `.env` or using `export $VAR $VAL` command
///
///
/// # Settings :
///
///
/// All settings are divided by `actions` and `values`.
/// 1. `Actions` - true or false on var exist check
/// 1. `Actions` - true or false on var exist check
/// > `Actions` is not about values, it can handle any data
///
///
/// 2. `Values` - classic string-driven environment variables
///
///
/// # `Actions` vars
///
///
/// 1. `NOXIS_NO_HAGENT` - to disable hagent work module and set up work mode as autonomous
///
///
/// **usage** :
/// ``` toml
/// ...
@ -82,8 +82,8 @@ impl std::fmt::Display for MetricsPrebootParams {
/// ``` bash
/// export NOXIS_NO_HAGENT "random-text"
/// ```
///
///
///
///
/// 2. `NOXIS_NO_LOGS` - to disable logging at all
/// **usage** :
/// ``` toml
@ -95,9 +95,9 @@ impl std::fmt::Display for MetricsPrebootParams {
/// ``` bash
/// export NOXIS_NO_LOGS "random-text"
/// ```
///
/// 3. `NOXIS_REFRESH_LOGS` - to truncate logs directory
///
///
/// 3. `NOXIS_REFRESH_LOGS` - to truncate logs directory
///
/// ### usage :
/// ``` toml
/// ...
@ -108,9 +108,9 @@ impl std::fmt::Display for MetricsPrebootParams {
/// ``` bash
/// export NOXIS_REFRESH_LOGS "random-text"
/// ```
///
///
/// 4. `NOXIS_NO_SUB` - to disable Redis subscribtion mechanism
///
///
/// ### usage :
/// ``` toml
/// NOXIS_NO_SUB = "random-text"
@ -119,76 +119,76 @@ impl std::fmt::Display for MetricsPrebootParams {
/// ``` bash
/// export NOXIS_NO_SUB "random-text"
/// ```
///
///
/// # `Values` vars
///
///
/// 1. `NOXIS_HAGENT_SOCKET_PATH` - to set Unix Domain Socket file's directory
///
///
/// ### usage :
/// ``` toml
/// NOXIS_HAGENT_SOCKET_PATH = "/var/run/example/hostagent.sock"
/// ```
/// or
/// or
/// ``` bash
/// export NOXIS_HAGENT_SOCKET_PATH "/var/run/example/hostagent.sock"
/// ```
///
/// 2 `NOXIS_LOG_TO` - to set directory for logs
///
/// 2 `NOXIS_LOG_TO` - to set directory for logs
///
/// ### usage :
/// ``` toml
/// NOXIS_LOG_TO = "/var/log/noxis/noxis.log"
/// ```
/// or
/// or
/// ``` bash
/// export NOXIS_LOG_TO "/var/log/noxis/noxis.log"
/// ```
///
///
/// 3. `NOXIS_REMOTE_SERVER_URL` - to set Redis Server
///
///
/// ### usage :
/// ``` toml
/// NOXIS_REMOTE_SERVER_URL = "ip.ip.ip.ip:port"
/// ```
/// or
/// or
/// ``` bash
/// export NOXIS_REMOTE_SERVER_URL "ip.ip.ip.ip:port"
/// ```
///
///
/// 4. `NOXIS_CONFIG_PATH` - to set Noxis' config full path
///
/// ### usage :
/// ``` toml
/// NOXIS_CONFIG_PATH = "./settings.json"
/// ```
/// or
/// or
/// ``` bash
/// export NOXIS_CONFIG_PATH "./settings.json"
/// ```
///
/// 5. `NOXIS_METRICS_MODE` - to set metrics mode
///
///
/// 5. `NOXIS_METRICS_MODE` - to set metrics mode
///
/// ### usage :
/// ``` toml
/// NOXIS_METRICS_MODE = "full"
/// ```
/// or
/// or
/// ``` bash
/// export NOXIS_METRICS_MODE "full"
/// ```
///
///
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct PrebootParams {
// pub no_hostagent : bool,
pub no_logs: bool,
pub refresh_logs : bool,
pub no_sub : bool,
pub refresh_logs: bool,
pub no_sub: bool,
// pub socket_path : PathBuf,
pub log_to : PathBuf,
pub remote_server_url : String,
pub config : PathBuf,
pub log_to: PathBuf,
pub remote_server_url: String,
pub config: PathBuf,
pub metrics: MetricsPrebootParams,
pub self_socket : PathBuf,
pub self_socket: PathBuf,
}
/// # implementation for `MetricsPrebootParams`
@ -204,19 +204,19 @@ impl PrebootParams {
// Err(_) => false,
// }
// },
no_logs : {
no_logs: {
match var("NOXIS_NO_LOGS") {
Ok(_) => true,
Err(_) => false,
}
},
refresh_logs : {
refresh_logs: {
match var("NOXIS_REFRESH_LOGS") {
Ok(_) => true,
Err(_) => false,
}
},
no_sub : {
no_sub: {
match var("NOXIS_NO_SUB") {
Ok(_) => true,
Err(_) => false,
@ -229,45 +229,48 @@ impl PrebootParams {
// Err(_) => PathBuf::from("/var/run/enode/hostagent.sock"),
// }
// },
log_to : {
log_to: {
match var("NOXIS_LOG_TO") {
Ok(val) => PathBuf::from(val),
Err(_) => PathBuf::from("./"),
}
},
remote_server_url : {
remote_server_url: {
match var("NOXIS_REMOTE_SERVER_URL") {
Ok(val) => val,
Err(_) => String::from("localhost"),
}
},
config : {
config: {
match var("NOXIS_CONFIG_PATH") {
Ok(val) => PathBuf::from(val),
Err(_) => PathBuf::from("./settings.json"),
}
},
metrics : {
metrics: {
match var("NOXIS_METRICS_MODE") {
Ok(val) => MetricsPrebootParams::from_env(&val),
Err(_) => MetricsPrebootParams::Full,
}
},
self_socket : {
self_socket: {
match var("NOXIS_SOCKET_PATH") {
Ok(val) => PathBuf::from(val),
Err(_) => {
let default = std::env::current_dir().expect("Crushed on getting current_dir path. Check fs state!");
warn!("$NOXIS_SOCKET_PATH wans't set. Default value - {}", default.display());
let default = std::env::current_dir()
.expect("Crushed on getting current_dir path. Check fs state!");
warn!(
"$NOXIS_SOCKET_PATH wans't set. Default value - {}",
default.display()
);
PathBuf::from(default)
},
}
}
},
}
}
}
// unit tests of preboot params parsing mech
// #[cfg(test)]
// mod preboot_unitests{
@ -280,40 +283,40 @@ impl PrebootParams {
// #[test]
// fn parsing_hagent_valid_args() {
// assert!(PrebootParams::try_parse_from(vec![
// "runner-rs",
// "runner-rs",
// "--socket-path", "/path/to/socket"
// ]).is_ok())
// ]).is_ok())
// }
// #[test]
// fn parsing_hagent_invalid_args() {
// assert!(PrebootParams::try_parse_from(vec![
// "runner-rs",
// "--socket-path", "/path/to/socket",
// "runner-rs",
// "--socket-path", "/path/to/socket",
// "--no-hagent"
// ]).is_err())
// ]).is_err())
// }
// #[test]
// fn parsing_log_valid_args() {
// assert!(PrebootParams::try_parse_from(vec![
// "runner-rs",
// "runner-rs",
// "--log-to", "/path/to/log/dir"
// ]).is_ok())
// ]).is_ok())
// }
// #[test]
// fn parsing_log_invalid_args() {
// assert!(PrebootParams::try_parse_from(vec![
// "runner-rs",
// "--log-to /path/to/log/dir",
// "runner-rs",
// "--log-to /path/to/log/dir",
// "--no-logs"
// ]).is_err())
// }
// #[test]
// fn parsing_config_valid_args() {
// assert!(PrebootParams::try_parse_from(vec![
// "runner-rs",
// "--no-sub",
// "runner-rs",
// "--no-sub",
// "--remote-server-url", "redis://127.0.0.1"
// ]).is_err())
// ]).is_err())
// }
// // #[test]
// // fn parsing_config_invalid_args_noremote_nosub() {
@ -325,10 +328,10 @@ impl PrebootParams {
// #[test]
// fn parsing_config_invalid_args_noremote_remoteurl() {
// assert!(PrebootParams::try_parse_from(vec![
// "runner-rs",
// "runner-rs",
// "--no-sub",
// "--remote-server-url", "redis://127.0.0.1"
// ]).is_err())
// ]).is_err())
// }
// #[test]
// fn parsing_metrics_args_using_value_enum() {
@ -339,4 +342,4 @@ impl PrebootParams {
// assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "none"]).is_ok());
// assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "unusual_value"]).is_err());
// }
// }
// }

View File

@ -10,17 +10,17 @@ type SendersVec = Arc<Vec<Arc<mpsc::Sender<u8>>>>;
/// # Fn set_valid_destructor
/// ## for initializing process of unstoppable grubbing metrics.
///
///
/// *input* : `Result<()>`
///
/// *output* : `Err` if it cant create signals listeners | `Ok` on returning Monitor
/// *output* : `Err` if it cant create signals listeners | `Ok` on returning Monitor
///
/// *initiator* : main thread
///
/// *managing* : `Arc<Vec<Arc<mpsc::Sender<u8>>>>`
///
/// *depends on* : Sig, Signals
///
///
pub async fn set_valid_destructor(senders: SendersVec) -> anyhow::Result<()> {
let (mut int, mut term, mut stop) = (
Sig::new(Signals::Sigint, senders.clone()),
@ -37,9 +37,9 @@ pub async fn set_valid_destructor(senders: SendersVec) -> anyhow::Result<()> {
}
/// # Enum Signals
/// ## for instancing each managed system signals (such as SIGINT)
///
///
/// > (element needed in Sig constructor's signature)
///
///
/// *depends on* : -
enum Signals {
Sigint,
@ -49,9 +49,9 @@ enum Signals {
/// # Struct Signals
/// ## for instancing each managed system signals (such as SIGINT)
///
///
/// > (needed to construct system signals listener)
///
///
/// *depends on* : Signals
struct Sig {
signal: Signal,
@ -69,7 +69,7 @@ impl Sig {
}
}
/// ## trait Display realization for returning String-name of signal
///
///
/// > (needed in logs)
impl std::fmt::Display for Signals {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
@ -90,20 +90,20 @@ impl Signals {
}
}
}
/// # Trait SigPostProcessing
/// # Trait SigPostProcessing
/// ## to handle post-processing jobs after getting system signal
///
///
/// ## > (needed in signals post-processing)
///
///
trait SigPostProcessing {
async fn post_processing(&mut self) -> io::Result<()>;
}
/// # Trait SigPostProcessing realization for Sig struct
/// # Trait SigPostProcessing realization for Sig struct
/// ## to deinitialize Monitor correctly after getting signal
///
///
/// ## > (needed in signals post-processing)
///
///
impl SigPostProcessing for Sig {
async fn post_processing(&mut self) -> io::Result<()> {
// manipulations ...

View File

@ -1,24 +1,32 @@
#![allow(dead_code)]
use std::net::Ipv4Addr;
use serde::{Deserialize, Serialize};
use async_trait::async_trait;
use std::sync::Arc;
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::net::Ipv4Addr;
use std::sync::Arc;
pub mod bus {
use std::fmt::Debug;
use super::*;
use noxis_cli::{Cli, metrics_models::MetricsMode};
use crate::utils::metrics::processes::{ProcessesAll, ProcessesGeneral, ProcessesQuery};
use crate::utils::metrics::MetricsExportable;
use noxis_cli::{metrics_models::MetricsMode, Cli};
pub type BusMessageContent = Box<dyn BusContent>;
#[derive(Debug)]
pub enum BusMessage {
Request(BusMessageDirection, BusMessageContentType, BusMessageContent),
Response(BusMessageDirection, BusMessageContentType, BusMessageContent),
Request(
BusMessageDirection,
BusMessageContentType,
BusMessageContent,
),
Response(
BusMessageDirection,
BusMessageContentType,
BusMessageContent,
),
}
#[derive(Debug)]
@ -35,21 +43,22 @@ pub mod bus {
MetricsObj,
Result,
MetricsModeTransfered,
ProcessQuery,
}
#[derive(Debug)]
pub enum CLiCommand {
Start,
Start,
Stop,
Restart,
Freeze,
Unfreeze
}
Freeze,
Unfreeze,
}
#[derive(Debug)]
pub struct InternalCli {
pub prc : String,
pub cmd : CLiCommand,
pub prc: String,
pub cmd: CLiCommand,
}
pub trait BusContent: Send + Sync + 'static + Debug + Any {
@ -90,19 +99,33 @@ pub mod bus {
BusMessageContentType::MetricsModeTransfered
}
}
impl BusContent for ProcessesQuery {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::ProcessQuery
}
}
}
#[derive(Debug)]
pub enum DependencyType {
File,
File,
Service,
}
#[derive(Debug)]
#[derive(Debug, Serialize, Clone, Copy)]
pub enum ServiceState {
Ok,
Unavailable
Unavailable,
}
impl std::fmt::Display for ServiceState {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
return match self {
ServiceState::Ok => write!(f, "Ok"),
ServiceState::Unavailable => write!(f, "Unavailable"),
};
}
}
pub struct ServiceWaitConfig(u32);
impl Default for ServiceWaitConfig {
@ -112,7 +135,7 @@ impl Default for ServiceWaitConfig {
}
pub enum FileTriggerType {
OnChange,
OnChange,
OnDelete,
}
@ -121,48 +144,87 @@ impl std::fmt::Display for FileTriggerType {
return match self {
FileTriggerType::OnChange => write!(f, "File was changed"),
FileTriggerType::OnDelete => write!(f, "File was moved or deleted"),
}
};
}
}
impl<'a> FileTriggerType {
pub fn event(&self, file_name: Arc<str>, trigger: Arc<str>) -> Events {
return match self {
FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)),
FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(file_name, DependencyType::File, trigger)),
}
FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(
file_name,
DependencyType::File,
trigger,
)),
FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(
file_name,
DependencyType::File,
trigger,
)),
};
}
pub fn event_from_file_trigger_controller(&self, file_name: Arc<str>, trigger: &FileTriggersForController) -> Events {
pub fn event_from_file_trigger_controller(
&self,
file_name: Arc<str>,
trigger: &FileTriggersForController,
) -> Events {
return match self {
FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_change.clone())),
FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(file_name, DependencyType::File, trigger.on_delete.clone())),
}
FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(
file_name,
DependencyType::File,
trigger.on_change.clone(),
)),
FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(
file_name,
DependencyType::File,
trigger.on_delete.clone(),
)),
};
}
}
#[derive(Debug)]
pub enum Triggers {
File { on_change: Arc<str>, on_delete: Arc<str> },
Service {on_lost: Arc<str>, wait: u32},
File {
on_change: Arc<str>,
on_delete: Arc<str>,
},
Service {
on_lost: Arc<str>,
wait: u32,
},
}
impl Triggers {
pub fn new_file(on_change: Arc<str>, on_delete: Arc<str>) -> Triggers {
Triggers::File { on_change, on_delete }
Triggers::File {
on_change,
on_delete,
}
}
pub fn new_service(on_lost: Arc<str>, wait_time: u32) -> Triggers {
Triggers::Service{on_lost, wait: wait_time}
Triggers::Service {
on_lost,
wait: wait_time,
}
}
pub fn to_service_negative_event(&self, service_name: Arc<str>) -> Option<Events> {
if let Triggers::Service { on_lost, .. } = self {
return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable(service_name, DependencyType::Service, on_lost.clone())))
return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable(
service_name,
DependencyType::Service,
on_lost.clone(),
)));
}
None
}
}
#[derive(Debug)]
pub struct FileTriggersForController{ pub on_change: Arc<str>, pub on_delete: Arc<str> }
pub struct FileTriggersForController {
pub on_change: Arc<str>,
pub on_delete: Arc<str>,
}
pub struct ServiceTriggersForController(Arc<str>);
impl std::fmt::Display for DependencyType {
@ -170,13 +232,13 @@ impl std::fmt::Display for DependencyType {
return match self {
DependencyType::File => write!(f, "File"),
DependencyType::Service => write!(f, "Service"),
}
};
}
}
#[derive(Debug)]
#[derive(Debug, serde::Serialize, Clone, Copy)]
pub enum ProcessState {
Pending,
Pending,
Holding,
Stopped,
StoppedByCli,
@ -186,19 +248,18 @@ impl std::fmt::Display for ProcessState {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
return match self {
ProcessState::Pending => write!(f, "Running"),
ProcessState::Holding => write!(f, "Holding"),
ProcessState::Holding => write!(f, "Frozen"),
ProcessState::Stopped => write!(f, "Stopped"),
ProcessState::StoppedByCli => write!(f, "Forcibly stopped"),
ProcessState::HoldingByCli => write!(f, "Forcibly holding"),
}
ProcessState::StoppedByCli => write!(f, "Stopped by Admin"),
ProcessState::HoldingByCli => write!(f, "Frozen by Admin"),
};
}
}
#[derive(Debug)]
pub enum Events {
Positive(Arc<str>),
Negative(NegativeOutcomes)
Negative(NegativeOutcomes),
}
#[derive(Debug)]
pub enum NegativeOutcomes {
@ -224,11 +285,11 @@ pub enum ConfigActuality {
/// # Struct for the 1st level in json conf file
/// ## for storing main config data
///
///
/// > (needed in serialization and deserialization)
///
///
/// *depends on* : `TrackingProcess`
///
///
/// ``` json
/// {
/// -> "dateOfCreation": "1721381809104",
@ -249,8 +310,8 @@ pub struct Processes {
impl Default for Processes {
fn default() -> Self {
Self {
date_of_creation : String::new(),
processes : Vec::new(),
date_of_creation: String::new(),
processes: Vec::new(),
}
}
}
@ -266,11 +327,11 @@ impl Processes {
/// # Struct for the 2nd level in json conf file
/// ## for each process to contain info, such as name, path and dependencies
///
///
/// > (needed in serialization and deserialization)
///
///
/// *depends on* : `Dependencies`
///
///
/// ``` json
/// ...
/// "processes": [
@ -291,11 +352,11 @@ pub struct TrackingProcess {
/// # Struct for the 3d level in json conf file
/// ## for processes' dependencies including files and services
///
///
/// > (needed in serialization and deserialization)
///
///
/// *depends on* : `Files`, `Services`
///
///
/// ``` json
/// ...
/// "path": "/home/user/monitor/runner-rs/temp-process",
@ -315,11 +376,11 @@ pub struct Dependencies {
/// # Struct for the 4th level in json conf file
/// ## for containing file object with its triggers to manipulate in daemons
///
///
/// > (needed in serialization and deserialization)
///
///
/// *depends on* : `FileTriggers`
///
///
/// ``` json
/// ...
/// "files": [
@ -327,7 +388,7 @@ pub struct Dependencies {
/// -> "filename": "dep-file",
/// -> "src": "/home/user/monitor/runner-rs/tests/examples/",
/// -> "triggers": { ... }
/// -> } ,
/// -> } ,
/// ...
/// ], ...
/// ```
@ -340,11 +401,11 @@ pub struct Files {
/// # Struct for the 4th level in json conf file
/// ## for containing service object with its triggers to manipulate in daemons
///
///
/// > (needed in serialization and deserialization)
///
///
/// *depends on* : `ServiceTriggers`
///
///
/// ``` json
/// ...
/// "services": [
@ -352,7 +413,7 @@ pub struct Files {
/// -> "hostname" : "ya.ru",
/// -> "port" : 443,
/// -> "triggers": { ... }
/// -> } ,
/// -> } ,
/// ...
/// ], ...
/// ```
@ -365,11 +426,11 @@ pub struct Services {
/// # Struct for the 5th level in json conf file
/// ## for instancing each service's policies such as on lost or time to wait till reachable
///
///
/// > (needed in serialization and deserialization)
///
///
/// *depends on* : -
///
///
/// ``` json
/// ...
/// "port": 443,
@ -389,11 +450,11 @@ pub struct ServiceTriggers {
/// # Struct for the 5th level in json conf file
/// ## for instancing each file's policies such as on-delete or onupdate events
///
///
/// > (needed in serialization and deserialization)
///
///
/// *depends on* : -
///
///
/// ``` json
/// ...
/// "src": "/home/user/monitor/runner-rs/tests/examples/",
@ -413,102 +474,101 @@ pub struct FileTriggers {
/// # Metrics struct
/// ## for gathering all system metrics (from container + each process)
///
///
/// > (needed in hagent communication, `?...?`)
///
///
/// *depends on* : `ContainerMetrics`, `ProcessMetrics`
///
#[derive(Debug, Clone, Serialize,)]
///
#[derive(Debug, Clone, Serialize)]
pub struct Metrics {
pub container_metrics : ContainerMetrics,
pub processes_metrics : Vec<ProcessMetrics>,
pub container_metrics: ContainerMetrics,
pub processes_metrics: Vec<ProcessMetrics>,
// pub net_metrics : Vec<PacketInfo>,
}
/// ## Metrics struct's constructor
/// ## Metrics struct's constructor
impl Metrics {
pub fn new(cm: ContainerMetrics, prm: Vec<ProcessMetrics>) -> Self {
Metrics {
container_metrics : cm,
processes_metrics : prm,
container_metrics: cm,
processes_metrics: prm,
// net_metrics : net,
}
}
}
/// # Container metrics struct
/// ## for gathering all container metrics
///
///
/// > (needed in gathering metrics)
///
///
/// *depends on* : -
///
///
#[derive(Debug, Clone, Serialize)]
pub struct ContainerMetrics {
container_id : String,
cpu_load : f32,
ram_load : f32,
container_id: String,
cpu_load: f32,
ram_load: f32,
// pub net_activity : ???
processes : Vec<String>,
processes: Vec<String>,
}
/// ## Container struct's constructor
/// ## Container struct's constructor
impl ContainerMetrics {
pub fn new(container_id : &str, cpu: f32, ram: f32, subsystems: Vec<String>,) -> Self{
pub fn new(container_id: &str, cpu: f32, ram: f32, subsystems: Vec<String>) -> Self {
ContainerMetrics {
container_id : String::from(container_id),
cpu_load : cpu,
ram_load : ram,
processes : subsystems,
container_id: String::from(container_id),
cpu_load: cpu,
ram_load: ram,
processes: subsystems,
}
}
}
/// # Process metrics struct
/// ## for gathering each process's all metrics
///
///
/// > (needed in gathering metrics)
///
///
/// *depends on* : -
///
///
#[derive(Debug, Clone, Serialize)]
pub struct ProcessMetrics {
pub process_name : String,
cpu_load : f32,
ram_load : f32,
pub process_name: String,
cpu_load: f32,
ram_load: f32,
}
/// ## Process struct's constructor
/// ## Process struct's constructor
impl ProcessMetrics {
pub fn new(process_name :&str, cpu: f32, ram: f32) -> Self {
pub fn new(process_name: &str, cpu: f32, ram: f32) -> Self {
ProcessMetrics {
process_name : String::from(process_name),
cpu_load : cpu,
ram_load : ram,
process_name: String::from(process_name),
cpu_load: cpu,
ram_load: ram,
}
}
}
/// # Packet info struct
/// ## for gathering info about container's net activity
///
///
/// > (needed in gathering metrics)
///
///
/// *depends on* : -
///
///
#[derive(Debug, Clone, Serialize)]
pub struct PacketInfo {
protocol : String,
dst_ip : Ipv4Addr,
src_ip : Ipv4Addr,
size : usize,
protocol: String,
dst_ip: Ipv4Addr,
src_ip: Ipv4Addr,
size: usize,
}
/// ## PacketInfo's constructor
/// ## PacketInfo's constructor
impl PacketInfo {
pub fn new(prt: String, dest: Ipv4Addr, src: Ipv4Addr, size_of_packet: usize) -> Self {
PacketInfo {
protocol : prt,
dst_ip : dest,
src_ip : src,
size : size_of_packet,
protocol: prt,
dst_ip: dest,
src_ip: src,
size: size_of_packet,
}
}
}
}

View File

@ -1,34 +1,40 @@
pub mod bus;
pub mod files;
pub mod hagent;
pub mod metrics;
pub mod prcs;
pub mod services;
pub mod bus;
use crate::options::structs::bus::{BusMessage, BusMessageContentType, InternalCli};
use crate::options::structs::Processes;
use async_trait::async_trait;
use files::v2::FilesController;
use lazy_static::lazy_static;
use log::{error, info};
use prcs::v2::ProcessesController;
use services::v2::ServicesController;
use std::process::Command;
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::time::Duration;
use prcs::v2::ProcessesController;
use files::v2::FilesController;
use services::v2::ServicesController;
use async_trait::async_trait;
use lazy_static::lazy_static;
use crate::options::structs::bus::{BusMessage, InternalCli, BusMessageContentType};
lazy_static! {
static ref GET_ID_CMD : &'static str = "hostname";
static ref GET_ID_CMD: &'static str = "hostname";
}
// const GET_ID_CMD: &str = "hostname";
pub mod v2 {
use std::collections::{BTreeMap, HashMap, LinkedList, VecDeque};
use crate::options::structs::{bus::CLiCommand, Events, FileTriggersForController, ProcessUnit, Triggers};
use super::*;
use crate::utils::metrics::processes::{ProcessesAll, ProcessesQuery};
use crate::{
options::structs::{
bus::CLiCommand, Events, FileTriggersForController, ProcessUnit, Triggers,
},
utils::metrics::processes::deps::{Dependencies, FilesExtended, ServicesExtended},
};
use std::any::Any;
use std::collections::{BTreeMap, HashMap, LinkedList, VecDeque};
type BusReciever = tokio::sync::mpsc::Receiver<BusMessage>;
type BusSender = Arc<tokio::sync::mpsc::Sender<BusMessage>>;
@ -42,21 +48,26 @@ pub mod v2 {
#[derive(Debug)]
struct Supervisor {
prcs : LinkedList<ProcessesController>,
files : LinkedList<FilesController>,
services : LinkedList<ServicesController>,
config : Arc<Processes>,
bus : (BusReciever, BusSender),
prcs: LinkedList<ProcessesController>,
files: LinkedList<FilesController>,
services: LinkedList<ServicesController>,
config: Arc<Processes>,
bus: (BusReciever, BusSender),
}
impl Supervisor {
pub fn new(bus_reciever : BusReciever, bus_sender: BusSender) -> Supervisor {
Supervisor { prcs: LinkedList::new(), files: LinkedList::new(), services: LinkedList::new(), config: Arc::new(Processes::default()), bus : (bus_reciever, bus_sender) }
pub fn new(bus_reciever: BusReciever, bus_sender: BusSender) -> Supervisor {
Supervisor {
prcs: LinkedList::new(),
files: LinkedList::new(),
services: LinkedList::new(),
config: Arc::new(Processes::default()),
bus: (bus_reciever, bus_sender),
}
}
pub async fn with_config(mut self, config: Processes) -> Supervisor {
self.config = Arc::from(config);
let _ = self.config.processes.iter()
.for_each(|prc| {
let _ = self.config.processes.iter().for_each(|prc| {
let (rx, tx) = mpsc::channel::<Events>(10);
let temp = ProcessesController::new(&prc.name, tx).with_exe(&prc.path);
if !self.prcs.contains(&temp) {
@ -65,15 +76,16 @@ pub mod v2 {
let rx = Arc::new(rx);
let proc_name: Arc<str> = Arc::from(prc.name.clone());
let _ = prc.dependencies.files.iter()
.for_each(|file| {
let _ = prc.dependencies.files.iter().for_each(|file| {
let mut hm = HashMap::new();
let triggers = FileTriggersForController { on_change: Arc::from(file.triggers.on_change.clone()), on_delete: Arc::from(file.triggers.on_delete.clone())};
let triggers = FileTriggersForController {
on_change: Arc::from(file.triggers.on_change.clone()),
on_delete: Arc::from(file.triggers.on_delete.clone()),
};
hm.insert(proc_name.clone(), (triggers, rx.clone()));
let tempfile = FilesController::new(&file.filename.as_str(), hm)
.with_path(&file.src);
let tempfile =
FilesController::new(&file.filename.as_str(), hm).with_path(&file.src);
if let Ok(file) = tempfile {
if let Some(current_file) = self.files.iter_mut().find(|a| &&file == a) {
@ -83,17 +95,15 @@ pub mod v2 {
}
}
});
// servs
let _ = prc.dependencies.services.iter()
.for_each(|serv| {
let access_url = ServicesController::get_access_url(&serv.hostname, serv.port.as_ref());
let _ = prc.dependencies.services.iter().for_each(|serv| {
let access_url =
ServicesController::get_access_url(&serv.hostname, serv.port.as_ref());
// preparations
let rx = rx.clone();
let serv_cont = ServicesController::new().with_access_name(
&serv.hostname,
&access_url
);
let serv_cont =
ServicesController::new().with_access_name(&serv.hostname, &access_url);
// triggers
let arc: Arc<str> = Arc::from(serv.triggers.on_lost.clone());
let triggers = Triggers::new_service(arc, serv.triggers.wait);
@ -105,12 +115,13 @@ pub mod v2 {
let mut vec: VecDeque<Arc<str>> = VecDeque::new();
vec.push_back(proc_name.clone());
// connection_queue
let mut connection_queue: BTreeMap<u32, VecDeque<Arc<str>>> = BTreeMap::new();
let mut connection_queue: BTreeMap<u32, VecDeque<Arc<str>>> =
BTreeMap::new();
connection_queue.insert(serv.triggers.wait, vec);
// event_reg
let mut hm = HashMap::new();
hm.insert(proc_name.clone(), (triggers, rx));
let serv_cont = serv_cont.with_params(connection_queue, hm);
self.services.push_back(serv_cont);
}
@ -119,96 +130,209 @@ pub mod v2 {
self
}
pub fn get_stats(&self) -> String {
format!("processes: {}, files: {}, services: {}", self.prcs.len(),self.files.len(), self.services.len())
format!(
"processes: {}, files: {}, services: {}",
self.prcs.len(),
self.files.len(),
self.services.len()
)
}
pub async fn extract_extended_procs(
config : Arc<Processes>,
prcs_list : &LinkedList<ProcessesController>,
files_list : &LinkedList<FilesController>,
servs_list : &LinkedList<ServicesController>,
) -> Vec<ProcessesAll> {
let mut procs = Vec::new();
for prc in config.processes.iter() {
if let Some(prc_cont) = prcs_list
.iter()
.find(|&prc_cont| prc.name == *prc_cont.name)
{
let mut vec_files = Vec::new();
let mut vec_services = Vec::new();
prc.dependencies
.files
.iter()
.map(|file| (file, format!("{}{}", file.filename, file.src)))
.for_each(|(file, code_name)| {
if let Some(file_cont) = files_list
.iter()
.find(|&file_cont| *file_cont.get_code_name() == code_name)
{
vec_files.push(FilesExtended {
name: file.filename.to_string(),
path: file.src.to_string(),
status: file_cont.get_state(),
triggers: file.triggers.to_owned(),
});
}
});
prc.dependencies
.services
.iter()
.map(|serv| {
(
serv,
format!("{}{}", serv.hostname, {
if let Some(port) = serv.port {
format!(":{}", port)
} else {
String::new()
}
}),
)
})
.for_each(|(serv, acces_url)| {
if let Some(serv_cont) = servs_list
.iter()
.find(|&serv_cont| *serv_cont.get_arc_access_url() == acces_url)
{
vec_services.push(ServicesExtended {
name: serv.hostname.to_owned(),
access_name: (*serv_cont.get_arc_access_url()).to_owned(),
status: serv_cont.get_state(),
triggers: serv.triggers.to_owned(),
});
}
});
procs.push(ProcessesAll {
name: prc_cont.name.clone().to_string(),
state: prc_cont.get_state(),
pid: prc_cont.get_pid(),
dependencies: Dependencies {
files: vec_files,
services: vec_services,
},
});
}
}
procs
}
}
#[async_trait]
impl ProcessUnit for Supervisor {
async fn process(&mut self) {
info!("Initializing monitoring ...");
loop {
//
//
let rec = &mut self.bus.0;
while let Ok(request) = rec.try_recv(){
while let Ok(request) = rec.try_recv() {
if let BusMessage::Request(_, _, cont) = request {
let cont: Box<dyn Any + Send> = cont;
if let Ok(cli) = cont.downcast::<InternalCli>() {
let mut count = 0;
let fut = (&mut self.prcs).into_iter()
.find(|prc| prc.name == Arc::from(cli.prc.as_ref()))
.map(|prc| async {
let count = &mut count;
*count += 1;
let res = match cli.cmd {
CLiCommand::Start => {
prc.start_by_user_call().await
},
CLiCommand::Stop => {
prc.stop_by_user_call().await
},
CLiCommand::Restart => {
prc.restart_by_user_call().await
},
CLiCommand::Freeze => {
prc.freeze_by_user_call().await
},
CLiCommand::Unfreeze => {
prc.unfreeze_by_user_call().await
},
};
let sender = self.bus.1.clone();
let resp_content = match res {
Ok(_) => Ok(format!("Ok on user call abour process {}", prc.name)),
Err(er) => Err(anyhow::Error::msg(format!("Error: User call for process {} failed : {}", prc.name, er))),
};
let _ = sender.send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::new(resp_content)
)).await;
1
});
if let Some(fut) = fut {
fut.await;
match cont.downcast::<InternalCli>() {
Ok(cli) => {
let mut count = 0;
let fut = (&mut self.prcs)
.into_iter()
.find(|prc| prc.name == Arc::from(cli.prc.as_ref()))
.map(|prc| async {
let count = &mut count;
*count += 1;
let res = match cli.cmd {
CLiCommand::Start => prc.start_by_user_call().await,
CLiCommand::Stop => prc.stop_by_user_call().await,
CLiCommand::Restart => prc.restart_by_user_call().await,
CLiCommand::Freeze => prc.freeze_by_user_call().await,
CLiCommand::Unfreeze => {
prc.unfreeze_by_user_call().await
}
};
let sender = self.bus.1.clone();
let resp_content = match res {
Ok(_) => Ok(format!(
"Ok on user call abour process {}",
prc.name
)),
Err(er) => Err(anyhow::Error::msg(format!(
"Error: User call for process {} failed : {}",
prc.name, er
))),
};
let _ = sender.send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::new(resp_content)
)).await;
1
});
if let Some(fut) = fut {
fut.await;
} else {
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToCli,
BusMessageContentType::RawString,
Box::new(
Err(anyhow::Error::msg(format!("No process named `{}` was found in controlled scope", cli.prc)))
)
)).await;
}
}
else {
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToCli,
BusMessageContentType::RawString,
Box::new(
Err(anyhow::Error::msg(format!("No process named `{}` was found in controlled scope", cli.prc)))
)
)).await;
Err(boxed) => {
if let Ok(query) = boxed.downcast::<ProcessesQuery>() {
match *query {
ProcessesQuery::QueryAll => {
let procs = Self::extract_extended_procs(
self.config.clone(),
&self.prcs,
&self.files,
&self.services,
).await;
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToMetrics,
BusMessageContentType::ProcessQuery,
Box::new(
ProcessesQuery::All(procs)
)
)).await;
}
ProcessesQuery::QueryGeneral => {
let mut vec = Vec::new();
for prc in &self.prcs {
vec.push(prc.get_general_info().await);
}
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToMetrics,
BusMessageContentType::ProcessQuery,
Box::new(
ProcessesQuery::General(vec)
)
)).await;
}
_ => {
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToCli,
BusMessageContentType::RawString,
Box::new(
Err(anyhow::Error::msg("Unknown request format was send to the Supervisor"))
)
)).await;
}
}
}
}
}
// TODO: GET PRCS METRICS DOWNCASTING
}
}
let mut tasks: Vec<tokio::task::JoinHandle<ControllerResult>> = vec![];
if let Some(mut val) = self.prcs.pop_front() {
tasks.push(
tokio::spawn( async move {
val.process().await;
ControllerResult::Process(Some(val))
})
);
tasks.push(tokio::spawn(async move {
val.process().await;
ControllerResult::Process(Some(val))
}));
}
if let Some(mut val) = self.files.pop_front() {
tasks.push(
tokio::spawn( async move {
val.process().await;
ControllerResult::File(Some(val))
})
);
tasks.push(tokio::spawn(async move {
val.process().await;
ControllerResult::File(Some(val))
}));
}
if let Some(mut val) = self.services.pop_front() {
tasks.push(
tokio::spawn( async move {
val.process().await;
ControllerResult::Service(Some(val))
})
);
tasks.push(tokio::spawn(async move {
val.process().await;
ControllerResult::Service(Some(val))
}));
}
for task in tasks {
match task.await {
@ -226,10 +350,12 @@ pub mod v2 {
pub async fn init_monitoring(
config: Processes,
bus_reciever : BusReciever,
bus_sender : BusSender,
) -> anyhow::Result<()> {
let mut supervisor = Supervisor::new(bus_reciever, bus_sender).with_config(config).await;
bus_reciever: BusReciever,
bus_sender: BusSender,
) -> anyhow::Result<()> {
let mut supervisor = Supervisor::new(bus_reciever, bus_sender)
.with_config(config)
.await;
info!("Monitoring: {} ", &supervisor.get_stats());
supervisor.process().await;
Ok(())
@ -239,7 +365,7 @@ pub mod v2 {
// todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}'
/// # Fn `get_container_id`
/// ## for getting container id used in logs
///
///
/// *input* : -
///
/// *output* : Some(String) if cont-id was grubbed | None - if not
@ -249,7 +375,7 @@ pub mod v2 {
/// *managing* : -
///
/// *depends on* : -
///
///
pub fn get_container_id() -> Option<String> {
match Command::new(*GET_ID_CMD).output() {
Ok(output) => {
@ -260,7 +386,7 @@ pub fn get_container_id() -> Option<String> {
if id.is_empty() {
return None;
}
Some(String::from_utf8_lossy(&output.stdout).to_string())
Some(String::from_utf8_lossy(&output.stdout).trim().to_string())
}
Err(_) => None,
}

View File

@ -1,29 +1,33 @@
use std::sync::Arc;
use crate::options::structs::bus::{BusMessageDirection, BusMessage};
use crate::options::structs::bus::{BusMessage, BusMessageDirection};
use crate::options::structs::ProcessUnit;
use tokio::sync::mpsc::{Sender, Receiver};
use log::{trace, debug, error};
use log::{debug, error, trace};
use tokio::sync::mpsc::{Receiver, Sender};
type Inner = Receiver<BusMessage>;
type Outter = Arc<Sender<BusMessage>>;
#[derive(Debug )]
#[derive(Debug)]
pub struct Highway {
to_cli : Outter,
to_supervisor : Outter,
to_metrics : Outter,
to_cli: Outter,
to_supervisor: Outter,
to_metrics: Outter,
}
impl Highway {
fn new(to_cli: Outter, to_supervisor: Outter, to_metrics: Outter) -> Self {
Self { to_cli, to_supervisor, to_metrics }
Self {
to_cli,
to_supervisor,
to_metrics,
}
}
async fn send(&self, msg: BusMessage) -> anyhow::Result<()> {
let dir = match &msg {
BusMessage::Request(dir, ..) | BusMessage::Response(dir, ..) => {
trace!("redirecting message to {:?} ...", dir);
dir
},
}
};
match dir {
BusMessageDirection::ToCli => self.send_cli(msg).await,
@ -46,13 +50,16 @@ impl Highway {
}
pub struct Bus {
inner : Inner,
highway : Highway,
inner: Inner,
highway: Highway,
}
impl Bus {
pub fn new(inner: Inner, to_cli: Outter, to_supervisor: Outter, to_metrics: Outter) -> Self {
Self { inner, highway: Highway::new(to_cli, to_supervisor, to_metrics) }
Self {
inner,
highway: Highway::new(to_cli, to_supervisor, to_metrics),
}
}
}
@ -64,13 +71,21 @@ impl ProcessUnit for Bus {
// debug!("new message to the Bus : {:?}", &content);
let msg = match content {
BusMessage::Request(direction, content_type, content) => {
trace!("bus has got a new Request with direction {:?} and type {:?}", &direction, &content_type);
trace!(
"bus has got a new Request with direction {:?} and type {:?}",
&direction,
&content_type
);
BusMessage::Request(direction, content_type, content)
},
}
BusMessage::Response(direction, content_type, content) => {
trace!("bus has got a new Response with direction {:?} and type {:?}", &direction, &content_type);
trace!(
"bus has got a new Response with direction {:?} and type {:?}",
&direction,
&content_type
);
BusMessage::Response(direction, content_type, content)
},
}
};
if let Err(er) = self.highway.send(msg).await {
error!("Cannot redirect message : {}", er);

View File

@ -1,216 +1,235 @@
use crate::options::structs::CustomError;
use crate::options::structs::Events;
use async_trait::async_trait;
use inotify::{EventMask, Inotify, WatchMask};
use std::path::Path;
use std::sync::Arc;
use tokio::sync::mpsc::Sender as Sender;
use crate::options::structs::Events;
use async_trait::async_trait;
use tokio::sync::mpsc::Sender;
pub mod v2 {
use log::{error, info, warn};
use crate::options::structs::{FileTriggerType, FileTriggersForController as Triggers, ProcessUnit};
use super::*;
use std::{collections::HashMap, path::Path};
use super::*;
use crate::options::structs::{
FileTriggerType, FileTriggersForController as Triggers, ProcessUnit,
};
use log::{error, info, warn};
use serde::Serialize;
use std::{collections::HashMap, path::Path};
type MpscSender = Arc<Sender<Events>>;
type EventHandlers = HashMap<Arc<str>, (Triggers, MpscSender)>;
type MpscSender = Arc<Sender<Events>>;
type EventHandlers = HashMap<Arc<str>, (Triggers, MpscSender)>;
#[derive(Debug)]
enum FileState {
Ok,
NotFound,
#[derive(Debug, Serialize, Clone, Copy)]
pub enum FileState {
Ok,
NotFound,
}
#[derive(Debug)]
pub struct FilesController {
name: Arc<str>,
path: String,
code_name: Arc<str>,
state: FileState,
watcher: Option<Inotify>,
triggers: EventHandlers,
}
impl PartialEq for FilesController {
fn eq(&self, other: &Self) -> bool {
self.code_name == other.code_name
}
}
#[derive(Debug)]
pub struct FilesController {
name : Arc<str>,
path : String,
code_name : Arc<str>,
state : FileState,
watcher : Option<Inotify>,
triggers : EventHandlers,
}
impl PartialEq for FilesController {
fn eq(&self, other: &Self) -> bool {
self.code_name == other.code_name
impl FilesController {
#[inline(always)]
pub fn new(name: &str, triggers: EventHandlers) -> FilesController {
let name: Arc<str> = Arc::from(name);
Self {
name: name.clone(),
path: String::new(),
state: FileState::Ok,
watcher: None,
triggers,
code_name: name.clone(),
}
}
impl FilesController {
#[inline(always)]
pub fn new(name: &str, triggers: EventHandlers) -> FilesController {
let name: Arc<str> = Arc::from(name);
Self {
name : name.clone(),
path : String::new(),
state : FileState::Ok,
watcher : None,
triggers,
code_name : name.clone(),
#[inline(always)]
pub fn with_path(mut self, path: impl AsRef<Path>) -> anyhow::Result<FilesController> {
self.path = path.as_ref().to_string_lossy().into_owned();
self.watcher = {
match create_watcher(&self.name, &self.path) {
Ok(val) => Some(val),
Err(er) => {
error!(
"Cannot create watcher for {} ({}) due to {}",
self.name, &self.path, er
);
return Err(er);
}
}
};
self.code_name = Arc::from(format!("{}{}", &self.path, &self.code_name));
Ok(self)
}
pub fn add_event(&mut self, file_controller: FilesController) {
for (k, v) in file_controller.triggers {
self.triggers.entry(k).or_insert(v);
}
#[inline(always)]
pub fn with_path(mut self, path: impl AsRef<Path>) -> anyhow::Result<FilesController> {
self.path = path.as_ref().to_string_lossy().into_owned();
self.watcher = {
match create_watcher(&self.name, &self.path) {
Ok(val) => Some(val),
Err(er) => {
error!("Cannot create watcher for {} ({}) due to {}", self.name, &self.path, er);
return Err(er)
}
}
async fn trigger_on(&mut self, trigger_type: Option<FileTriggerType>) {
for (prc_name, (triggers, channel)) in &self.triggers {
let msg = match &trigger_type {
None => Events::Positive(self.code_name.clone()),
Some(event) => {
info!(
"Event on file {} ({}) : {}. Notifying `{}` ...",
&self.name, &self.path, event, &prc_name
);
event.event_from_file_trigger_controller(self.code_name.clone(), &triggers)
}
};
self.code_name = Arc::from(format!("{}{}", &self.path, &self.code_name));
Ok(self)
}
pub fn add_event(&mut self, file_controller : FilesController) {
for (k, v) in file_controller.triggers {
self.triggers.entry(k).or_insert(v);
}
}
async fn trigger_on(&mut self, trigger_type: Option<FileTriggerType>) {
for (prc_name, (triggers, channel)) in &self.triggers {
let msg = match &trigger_type {
None => {
Events::Positive(self.code_name.clone())
},
Some(event) => {
info!("Event on file {} ({}) : {}. Notifying `{}` ...", &self.name, &self.path, event, &prc_name);
event.event_from_file_trigger_controller(self.code_name.clone(), &triggers)
},
};
let _ = channel.send(msg).await;
}
let _ = channel.send(msg).await;
}
}
#[async_trait]
impl ProcessUnit for FilesController {
async fn process(&mut self) {
if let Ok(_) = check_file(&self.name, &self.path).await {
if let FileState::NotFound = self.state {
info!("File {} ({}) was found in determined scope. Notifying ...", self.name, self.code_name);
self.state = FileState::Ok;
self.trigger_on(None).await;
}
match &mut self.watcher {
Some(notify) => {
let mut buffer = [0; 128];
if let Ok(notif_events) = notify.read_events(&mut buffer) {
let (need_to_recreate, was_modifired) = notif_events.fold((false, false), |(a, b), mask| {
pub fn get_state(&self) -> FileState {
self.state
}
pub fn get_code_name(&self) -> Arc<str> {
self.code_name.clone()
}
}
#[async_trait]
impl ProcessUnit for FilesController {
async fn process(&mut self) {
if let Ok(_) = check_file(&self.name, &self.path).await {
if let FileState::NotFound = self.state {
info!(
"File {} ({}) was found in determined scope. Notifying ...",
self.name, self.code_name
);
self.state = FileState::Ok;
self.trigger_on(None).await;
}
match &mut self.watcher {
Some(notify) => {
let mut buffer = [0; 128];
if let Ok(notif_events) = notify.read_events(&mut buffer) {
let (need_to_recreate, was_modifired) =
notif_events.fold((false, false), |(a, b), mask| {
(
a || mask.mask == EventMask::DELETE_SELF,
b || mask.mask == EventMask::MODIFY,
)
});
if let (recreate_watcher, true) = (need_to_recreate, was_modifired) {
warn!("File {} ({}) was changed", self.name, &self.path);
if recreate_watcher {
self.watcher = match create_watcher(&self.name, &self.path) {
Ok(notifier) => Some(notifier),
Err(er) => {
error!("Failed to recreate watcher for {} ({}) due to {}",
self.name,
&self.path,
er
);
None
},
if let (recreate_watcher, true) = (need_to_recreate, was_modifired) {
warn!("File {} ({}) was changed", self.name, &self.path);
if recreate_watcher {
self.watcher = match create_watcher(&self.name, &self.path) {
Ok(notifier) => Some(notifier),
Err(er) => {
error!(
"Failed to recreate watcher for {} ({}) due to {}",
self.name, &self.path, er
);
None
}
}
self.trigger_on(Some(FileTriggerType::OnChange)).await;
return;
}
self.trigger_on(Some(FileTriggerType::OnChange)).await;
return;
}
},
None => { /* DEAD END */},
}
}
} else {
if let FileState::Ok = self.state {
warn!("File {} ({}) was not found in determined scope", self.name, &self.path);
self.state = FileState::NotFound;
self.trigger_on(Some(FileTriggerType::OnDelete)).await;
}
return;
None => { /* DEAD END */ }
}
self.trigger_on(None).await;
} else {
if let FileState::Ok = self.state {
warn!(
"File {} ({}) was not found in determined scope",
self.name, &self.path
);
self.state = FileState::NotFound;
self.trigger_on(Some(FileTriggerType::OnDelete)).await;
}
return;
}
self.trigger_on(None).await;
}
}
}
/// # Fn `create_watcher`
/// ## for creating watcher on file's delete | update events
///
/// *input* : `&str`, `&str`
///
/// *output* : `Err` if it cant create file watcher | `Ok(watcher)` on successfull construction
///
/// *initiator* : fn `file_handler`, fn `utils::run_daemons`
///
/// *managing* : current file's name: &str, path in local storage to current file: &str
///
/// *depends on* : -
///
pub fn create_watcher(filename: &str, path: &str) -> anyhow::Result<Inotify> {
let src = format!("{}{}", path, filename);
let inotify: Inotify = Inotify::init()?;
inotify.watches().add(&src, WatchMask::ALL_EVENTS)?;
Ok(inotify)
}
/// # Fn `create_watcher`
/// ## for creating watcher on file's delete | update events
///
/// *input* : `&str`, `&str`
///
/// *output* : `Err` if it cant create file watcher | `Ok(watcher)` on successfull construction
///
/// *initiator* : fn `file_handler`, fn `utils::run_daemons`
///
/// *managing* : current file's name: &str, path in local storage to current file: &str
///
/// *depends on* : -
///
pub fn create_watcher(filename: &str, path: &str) -> anyhow::Result<Inotify> {
let src = format!("{}{}", path, filename);
let inotify: Inotify = Inotify::init()?;
inotify.watches().add(&src, WatchMask::ALL_EVENTS)?;
Ok(inotify)
}
/// # Fn `check_file`
/// ## for checking existance of current file
///
/// *input* : `&str`, `&str`
///
/// *output* : `Ok(())` if file exists | `Err(_)` if not | panic on fs error
///
/// *initiator* : fn `file_handler`
///
/// *managing* : current file's name: `&str` and current file's path in local storage: `&str`
///
/// *depends on* : network activity
///
pub async fn check_file(filename: &str, path: &str) -> Result<(), CustomError> {
let arc_name = Arc::new(filename.to_string());
let arc_path = Arc::new(path.to_string());
tokio::task::spawn_blocking(move || {
let file_concat = format!("{}{}", arc_path, arc_name);
let path = Path::new(&file_concat);
if path.exists() {
Ok(())
} else {
Err(CustomError::Fatal)
}
})
.await
.unwrap_or_else(|_| {
panic!("Corrupted while file check process");
})
}
/// # Fn `check_file`
/// ## for checking existance of current file
///
/// *input* : `&str`, `&str`
///
/// *output* : `Ok(())` if file exists | `Err(_)` if not | panic on fs error
///
/// *initiator* : fn `file_handler`
///
/// *managing* : current file's name: `&str` and current file's path in local storage: `&str`
///
/// *depends on* : network activity
///
pub async fn check_file(filename: &str, path: &str) -> Result<(), CustomError> {
let arc_name = Arc::new(filename.to_string());
let arc_path = Arc::new(path.to_string());
tokio::task::spawn_blocking(move || {
let file_concat = format!("{}{}", arc_path, arc_name);
let path = Path::new(&file_concat);
if path.exists() {
Ok(())
} else {
Err(CustomError::Fatal)
}
})
.await
.unwrap_or_else(|_| {
panic!("Corrupted while file check process");
})
}
#[cfg(test)]
mod files_unittests {
use super::*;
#[tokio::test]
async fn try_to_create_watcher() {
let res = create_watcher("dep-file", "./tests/examples/");
assert!(res.is_ok());
}
#[tokio::test]
async fn try_to_create_invalid_watcher() {
let res = create_watcher("invalid-file", "/path/to/the/no/dir");
assert!(res.is_err());
}
#[tokio::test]
async fn check_existing_file() {
let res = check_file("dep-file", "./tests/examples/").await;
assert!(res.is_ok());
}
#[tokio::test]
async fn check_non_existing_file() {
let res = check_file("invalid-file", "/path/to/the/no/dir").await;
assert!(res.is_err());
}
#[cfg(test)]
mod files_unittests {
use super::*;
#[tokio::test]
async fn try_to_create_watcher() {
let res = create_watcher("dep-file", "./tests/examples/");
assert!(res.is_ok());
}
#[tokio::test]
async fn try_to_create_invalid_watcher() {
let res = create_watcher("invalid-file", "/path/to/the/no/dir");
assert!(res.is_err());
}
#[tokio::test]
async fn check_existing_file() {
let res = check_file("dep-file", "./tests/examples/").await;
assert!(res.is_ok());
}
#[tokio::test]
async fn check_non_existing_file() {
let res = check_file("invalid-file", "/path/to/the/no/dir").await;
assert!(res.is_err());
}
}

View File

@ -1,15 +1,15 @@
//
//
// module needed to check host-agent health condition and to communicate with it
//
//
use anyhow::{Error, Ok, Result};
use tokio::{io::Interest, net::UnixStream};
use anyhow::{Ok, Result, Error};
// to kill lint bug
// to kill lint bug
#[allow(unused_imports)]
use tokio::net::UnixListener;
/// # Fn `open_unix_socket`
/// ## opening unix-socket for host-agent communication
///
/// ## opening unix-socket for host-agent communication
///
/// *input* : -
///
/// *output* : `Ok(socket)` if socket was successfully opened | `Err(er)` if not
@ -19,7 +19,7 @@ use tokio::net::UnixListener;
/// *managing* : -
///
/// *depends on* : -
///
///
#[allow(dead_code)]
async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error> {
// "/var/run/enode/hostagent.sock"
@ -27,18 +27,18 @@ async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error>
}
/// # Fn `ha_healthcheck`
/// ## for checking host-agent state
///
/// ## for checking host-agent state
///
/// *input* : `&UnixStream`
///
/// *output* : `Ok(()))` if host-agent is running | `Err(er)` if not
///
/// *initiator* : main thread `(??)`
///
/// *managing* : ref on unix-socket object
/// *managing* : ref on unix-socket object
///
/// *depends on* : -
///
///
#[allow(dead_code)]
async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
socket.ready(Interest::WRITABLE).await?;
@ -48,8 +48,8 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
}
/// # Fn `ha_healthcheck`
/// ## for sending data to host-agent using unix-socket
///
/// ## for sending data to host-agent using unix-socket
///
/// *input* : `&UnixStream`, `&str`
///
/// *output* : `Ok(()))` if data was sent| `Err(er)` if not
@ -59,9 +59,9 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
/// *managing* : socket: `&UnixStream`, data: `&str`
///
/// *depends on* : -
///
///
#[allow(dead_code)]
async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error > {
async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error> {
socket.ready(Interest::WRITABLE).await?;
socket.writable().await?;
socket.try_write(data.as_bytes())?;
@ -91,8 +91,8 @@ mod hagent_unittets {
// --Result<maybe Response>
// one-shot func
async fn hagent_communication_test() {
use crate::options::structs::{ProcessMetrics, ContainerMetrics, Metrics};
use crate::options::structs::{ContainerMetrics, Metrics, ProcessMetrics};
let procm = ProcessMetrics::new("test-prc", 15.0, 5.0);
let contm = ContainerMetrics::new("test", 32.0, 12.0, vec![procm.process_name.clone()]);
let metrics = Metrics::new(contm, vec![procm]);
@ -105,10 +105,11 @@ mod hagent_unittets {
let sock = sock.unwrap();
assert!(ha_healthcheck(&sock).await.is_ok());
assert!(ha_send_data(&sock, &metrics).await.is_ok());
}
#[tokio::test]
async fn open_unixsocket_test() {
assert!(open_unix_socket("non/valid/socket/file.sock").await.is_err());
assert!(open_unix_socket("non/valid/socket/file.sock")
.await
.is_err());
}
}
}

View File

@ -1,193 +1,325 @@
// submodule needed to get metrics such as
// cpu load, ram/rom load and net activity
use std::{any::Any, collections::BTreeMap, sync::Arc};
use crate::options::structs::{ProcessState, TrackingProcess};
use crate::{
options::structs::{ProcessState, TrackingProcess},
utils::metrics::processes::{ProcessesGeneral, ProcessesQuery},
};
use futures::lock::Mutex;
use log::warn;
use noxis_cli::metrics_models::MetricsMode;
use std::{any::Any, collections::BTreeMap, sync::Arc};
// use chrono::Duration;
use sysinfo::{System, Disks as DisksList, Networks};
use super::prcs::v2::Pid;
use crate::options::structs::bus::{BusMessage, BusMessageContentType, BusMessageDirection};
use crate::options::structs::Dependencies;
use serde::Serialize;
use super::prcs::v2::Pid;
use std::fmt::Debug;
use crate::options::structs::bus::{BusMessage, BusMessageDirection, BusMessageContentType};
use sysinfo::{Disks as DisksList, Networks, System};
// use noxis_cli::metrics_models::MetricsMode;
pub type MetricProcesses = Vec<ProcessExtended>;
pub type MetricProcesses = Vec<ProcessExtended>;
type CoreUsage = BTreeMap<usize, CoreInfo>;
type Disks = Vec<Disk>;
type Disks = Vec<Disk>;
type Ifaces = Vec<Network>;
type BusReciever = tokio::sync::mpsc::Receiver<BusMessage>;
type BusSender = Arc<tokio::sync::mpsc::Sender<BusMessage>>;
/// # Fn `init_metrics_grubber`
/// ## for initializing process of unstoppable grubbing metrics.
///
///
/// *input* : `Arc<Mutex<UnixSocket>>` ??
///
/// *output* : `Err` if it cant create grubbers | `Ok` on finish
///
/// *initiator* : main thread ??
/// *initiator* : main thread ??
///
/// *managing* : object of unix-socket reader
/// *managing* : object of unix-socket reader
///
/// *depends on* : -
///
///
pub async fn init_metrics_grubber(
/* BROADCSAT LISTENER TO GET `PROCESSES` OBJ */
bus_sender : BusSender,
bus_recirever : BusReciever,
) -> anyhow::Result<()> {
bus_sender: BusSender,
bus_reciever: BusReciever,
) -> anyhow::Result<()> {
let mut system = System::new();
let mut disks = DisksList::new_with_refreshed_list();
let mut networks = Networks::new_with_refreshed_list();
// get_all_metrics(&mut system).await;
/* TODO */
let mut bus_recirever = bus_recirever;
let mut bus_reciever = bus_reciever;
loop {
if let Ok(BusMessage::Request(_, _, cont)) = bus_recirever.try_recv() {
if let Ok(BusMessage::Request(_, _, cont)) = bus_reciever.try_recv() {
system.refresh_all();
disks.refresh_list();
networks.refresh_list();
let cont: Box<dyn Any + Send> = cont;
match cont.downcast::<MetricsMode>() {
Err(_) => {
warn!("Unrecognized Metric mode was given");
let _ = bus_sender.send(BusMessage::Response(
BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::new(
Err(anyhow::Error::msg(format!("Unrecognized Metric mode was given"))
let _ = bus_sender
.send(BusMessage::Response(
BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::new(Err(anyhow::Error::msg(format!(
"Unrecognized Metric mode was given"
)))),
))
)).await;
.await;
}
Ok(mode) => {
tokio::time::sleep(tokio::time::Duration::from_millis(200)).await;
let metric: Box<dyn MetricsExportable> = match *mode {
MetricsMode::Full => Box::new(get_all_metrics(&mut system).await),
MetricsMode::Full => {
let mut refs =
get_all_metrics(&mut system, bus_sender.clone(), &disks, &networks)
.await;
if let Some(prcs) = bus_reciever.recv().await {
if let BusMessage::Response(_, _, cont) = prcs {
let cont: Box<dyn Any> = cont;
if let Ok(cont) = cont.downcast::<ProcessesQuery>() {
if let ProcessesQuery::General(info) = *cont {
refs.processes = info;
}
}
}
}
Box::new(refs)
}
MetricsMode::Host => {
Box::new(get_global_host_info(&mut system, &disks, &networks).await)
}
MetricsMode::Cpu => Box::new(get_cpu_metrics(&mut system).await),
MetricsMode::Ram => Box::new(get_ram_metrics(&mut system).await),
MetricsMode::Rom => Box::new(get_all_disks_metrics().await),
MetricsMode::Network => Box::new(get_all_ifaces_metrics().await),
MetricsMode::Rom => Box::new(get_all_disks_metrics(&disks).await),
MetricsMode::Network => Box::new(get_all_ifaces_metrics(&networks).await),
// MetricsMode::Processes => {},
// TODO ->
_ => todo!(),
};
// let metric: Box<dyn BusContent> = Box::new(metric);
// let metric: Box<dyn BusContent> = Box::new(metric);
let metric = metric.serialze_into_output();
let _ = bus_sender.send(BusMessage::Response(
BusMessageDirection::ToCli,
BusMessageContentType::MetricsObj,
Box::new(metric)
)).await;
},
let _ = bus_sender
.send(BusMessage::Response(
BusMessageDirection::ToCli,
BusMessageContentType::MetricsObj,
Box::new(metric),
))
.await;
}
}
}
// TODO else if response in metrics
// else if let Response ....
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
}
async fn get_all_metrics(system: &mut System) -> FullMetrics {
system.refresh_all();
tokio::time::sleep(tokio::time::Duration::from_millis(200)).await;
async fn get_all_metrics(
system: &mut System,
sender: BusSender,
disks: &DisksList,
networks: &Networks,
) -> FullMetrics {
let host = get_host_info().await;
let cpu = get_cpu_metrics(system).await;
let ram = get_ram_metrics(system).await;
let disks = get_all_disks_metrics().await;
let ifaces = get_all_ifaces_metrics().await;
let prcs = MetricProcesses::new();
let disks = get_all_disks_metrics(&disks).await;
let ifaces = get_all_ifaces_metrics(&networks).await;
let prcs: Vec<ProcessesGeneral> = Vec::new();
let _ = sender
.send(BusMessage::Request(
BusMessageDirection::ToSupervisor,
BusMessageContentType::ProcessQuery,
Box::new(ProcessesQuery::QueryGeneral),
))
.await;
FullMetrics::create(host, cpu, ram, disks, ifaces, prcs)
}
async fn get_global_host_info(
system: &mut System,
disks: &DisksList,
networks: &Networks,
) -> HostGeneral {
HostGeneral {
hostname: System::host_name().unwrap_or_default(),
os: System::long_os_version().unwrap_or_default(),
kernel: System::kernel_version().unwrap_or_default(),
cpu_percentage: system.global_cpu_usage(),
ram_available: system.total_memory() - system.free_memory(),
disk_percentage: {
let total = disks
.iter()
.map(|disk| disk.available_space() * 100 / disk.total_space())
.collect::<Vec<u64>>();
total.iter().sum::<u64>() / (total.len() as u64)
},
net_stat: {
let total = networks
.iter()
.map(|(_, iface_data)| iface_data.received() + iface_data.transmitted())
.collect::<Vec<u64>>();
total.iter().sum::<u64>() / ((total.len() * 2) as u64)
},
}
}
async fn get_host_info() -> HostInfo {
HostInfo {
hostname : System::host_name().unwrap_or_default(),
os : System::long_os_version().unwrap_or_default(),
kernel : System::kernel_version().unwrap_or_default(),
hostname: System::host_name().unwrap_or_default(),
os: System::long_os_version().unwrap_or_default(),
kernel: System::kernel_version().unwrap_or_default(),
}
}
async fn get_cpu_metrics(system: &mut System) -> Cpu {
system.refresh_cpu_all();
tokio::time::sleep(tokio::time::Duration::from_millis(200)).await;
let mut buffer = CoreUsage::new();
let global_usage = system.global_cpu_usage();
system.cpus()
.iter()
.enumerate()
.for_each(|(id, cpu)| {
system.cpus().iter().enumerate().for_each(|(id, cpu)| {
let core_info = CoreInfo {
// id,
brand : cpu.brand().to_string(),
name : cpu.name().to_string(),
frequency : cpu.frequency(),
vendor_id : cpu.vendor_id().to_string(),
usage : cpu.cpu_usage(),
brand: cpu.brand().to_string(),
name: cpu.name().to_string(),
frequency: cpu.frequency(),
vendor_id: cpu.vendor_id().to_string(),
usage: cpu.cpu_usage(),
};
// buffer.push(core_info);
buffer.entry(id).or_insert(core_info);
});
Cpu {
global_usage,
usage: buffer
usage: buffer,
}
}
async fn get_ram_metrics(system: &mut System) -> Ram {
system.refresh_memory();
tokio::time::sleep(tokio::time::Duration::from_millis(200)).await;
Ram {
free_mem : system.free_memory(),
free_swap : system.free_swap(),
total_mem : system.total_memory(),
total_swap : system.total_swap(),
free_mem: system.free_memory(),
free_swap: system.free_swap(),
total_mem: system.total_memory(),
total_swap: system.total_swap(),
}
}
async fn get_all_disks_metrics() -> Disks {
let disks = DisksList::new_with_refreshed_list();
async fn get_all_disks_metrics(disks: &DisksList) -> Disks {
// let disks = DisksList::new_with_refreshed_list();
let mut buffer = Disks::new();
disks.list()
.iter()
.for_each(|disk| {
disks.list().iter().for_each(|disk| {
let disk = Disk {
name : disk.name().to_string_lossy().into_owned(),
name: disk.name().to_string_lossy().into_owned(),
kind: disk.kind().to_string(),
fs : disk.file_system().to_string_lossy().into_owned(),
mount_point : disk.mount_point().to_string_lossy().into_owned(),
total_space : disk.total_space(),
available_space : disk.available_space(),
is_removable : disk.is_removable(),
is_readonly : disk.is_read_only()
fs: disk.file_system().to_string_lossy().into_owned(),
mount_point: disk.mount_point().to_string_lossy().into_owned(),
total_space: disk.total_space(),
available_space: disk.available_space(),
is_removable: disk.is_removable(),
is_readonly: disk.is_read_only(),
};
buffer.push(disk);
});
buffer
}
async fn get_all_ifaces_metrics() -> Ifaces {
async fn get_all_ifaces_metrics(networks: &Networks) -> Ifaces {
let mut ifaces = Ifaces::new();
let networks = Networks::new_with_refreshed_list();
networks.iter()
.for_each(|(iface_name, data)| {
networks.iter().for_each(|(iface_name, data)| {
let mac = data.mac_address().to_string();
let ip_addrs = data
.ip_networks()
.iter()
.map(|ipaddr| format!("{}/{}", ipaddr.addr, ipaddr.prefix))
.collect::<Vec<String>>();
let iface = Network {
iname : iface_name.to_owned(),
mac : mac,
recieved : data.received(),
transmitted : data.transmitted(),
total_recieved_bytes : data.total_received(),
total_transmitted_bytes : data.total_transmitted(),
total_recieved_packets : data.total_packets_received(),
total_transmitted_packets : data.total_packets_transmitted(),
errors_on_recieved : data.errors_on_received(),
errors_on_transmitted : data.errors_on_transmitted(),
iname: iface_name.to_owned(),
mac: mac,
ip_addresses: ip_addrs,
recieved: data.received(),
transmitted: data.transmitted(),
total_recieved_bytes: data.total_received(),
total_transmitted_bytes: data.total_transmitted(),
total_recieved_packets: data.total_packets_received(),
total_transmitted_packets: data.total_packets_transmitted(),
errors_on_recieved: data.errors_on_received(),
errors_on_transmitted: data.errors_on_transmitted(),
};
ifaces.push(iface);
});
ifaces
}
async fn get_all_processes_metrics(system: &mut System) { /* TODO */}
async fn get_all_processes_metrics(system: &mut System) { /* TODO */
}
pub mod processes {
use crate::options::structs::ProcessState;
use crate::utils::prcs::v2::Pid;
#[derive(Debug, serde::Serialize)]
pub enum ProcessesQuery {
General(Vec<ProcessesGeneral>),
All(Vec<ProcessesAll>),
QueryGeneral,
QueryAll,
}
impl ProcessesQuery {
pub fn serialze_to_bus(&self) -> Option<String> {
match self {
ProcessesQuery::General(prc) => serde_json::to_string_pretty(prc).ok(),
ProcessesQuery::All(prc) => serde_json::to_string_pretty(prc).ok(),
ProcessesQuery::QueryGeneral => None,
ProcessesQuery::QueryAll => None,
}
}
}
#[derive(Debug, serde::Serialize)]
pub struct ProcessesGeneral {
pub name: String,
pub state: ProcessState,
pub pid: Pid,
}
#[derive(Debug, serde::Serialize)]
pub struct ProcessesAll {
pub name: String,
pub state: ProcessState,
pub pid: Pid,
pub dependencies: deps::Dependencies,
}
pub mod deps {
use crate::options::structs::{FileTriggers, ServiceState, ServiceTriggers};
use crate::utils::files::v2::FileState;
// use super::*;
#[derive(Debug, serde::Serialize)]
pub struct FilesExtended {
pub name: String,
pub path: String,
pub status: FileState,
pub triggers: FileTriggers,
}
#[derive(Debug, serde::Serialize)]
pub struct ServicesExtended {
pub name: String,
pub access_name: String,
pub status: ServiceState,
pub triggers: ServiceTriggers,
}
#[derive(Debug, serde::Serialize)]
pub struct Dependencies {
pub files: Vec<FilesExtended>,
pub services: Vec<ServicesExtended>,
}
}
}
pub enum MetricType {
FullMetrics,
@ -200,46 +332,42 @@ pub enum MetricType {
}
pub trait MetricsExportable: Send + Sync + 'static + Debug + Any {
fn get_metric_type(&self) -> MetricType;
fn serialze_into_output(&self) -> anyhow::Result<String>;
}
#[derive(Serialize, Debug)]
struct FullMetrics {
hostname : String,
os : String,
kernel : String,
cpu : Cpu,
ram : Ram,
disks : Disks,
networks : Ifaces,
processes : MetricProcesses,
hostname: String,
os: String,
kernel: String,
cpu: Cpu,
ram: Ram,
disks: Disks,
networks: Ifaces,
pub processes: Vec<ProcessesGeneral>,
}
impl FullMetrics {
fn create(
host: HostInfo,
cpu : Cpu,
ram : Ram,
disks : Disks,
ifaces : Ifaces,
processes : MetricProcesses,
host: HostInfo,
cpu: Cpu,
ram: Ram,
disks: Disks,
ifaces: Ifaces,
processes: Vec<ProcessesGeneral>,
) -> Self {
Self {
hostname : host.hostname,
os : host.os,
kernel : host.kernel,
hostname: host.hostname,
os: host.os,
kernel: host.kernel,
cpu,
ram,
ram,
disks,
networks : ifaces,
processes
networks: ifaces,
processes,
}
}
}
impl MetricsExportable for FullMetrics {
fn get_metric_type(&self) -> MetricType {
MetricType::FullMetrics
}
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
@ -247,15 +375,29 @@ impl MetricsExportable for FullMetrics {
#[derive(Debug, Serialize)]
struct HostInfo {
hostname : String,
os : String,
kernel : String,
hostname: String,
os: String,
kernel: String,
}
impl MetricsExportable for HostInfo {
fn get_metric_type(&self) -> MetricType {
MetricType::HostInfo
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Debug, Serialize)]
struct HostGeneral {
hostname: String,
os: String,
kernel: String,
cpu_percentage: f32,
ram_available: u64,
disk_percentage: u64,
net_stat: u64,
}
impl MetricsExportable for HostGeneral {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
@ -263,41 +405,34 @@ impl MetricsExportable for HostInfo {
#[derive(Serialize, Debug)]
struct Cpu {
global_usage : f32,
usage : CoreUsage,
global_usage: f32,
usage: CoreUsage,
}
impl MetricsExportable for Cpu {
fn get_metric_type(&self) -> MetricType {
MetricType::Cpu
}
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Serialize, Debug)]
struct CoreInfo {
name: String,
brand : String,
frequency : u64,
vendor_id : String,
usage : f32,
name: String,
brand: String,
frequency: u64,
vendor_id: String,
usage: f32,
}
#[derive(Serialize, Debug)]
struct Ram {
free_mem : u64,
free_swap : u64,
total_mem : u64,
total_swap : u64
free_mem: u64,
free_swap: u64,
total_mem: u64,
total_swap: u64,
}
impl MetricsExportable for Ram{
fn get_metric_type(&self) -> MetricType {
MetricType::Ram
}
impl MetricsExportable for Ram {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
@ -305,20 +440,17 @@ impl MetricsExportable for Ram{
#[derive(Serialize, Debug)]
struct Disk {
name : String,
kind : String,
fs : String,
mount_point : String,
total_space : u64,
available_space : u64,
is_removable : bool,
is_readonly : bool,
name: String,
kind: String,
fs: String,
mount_point: String,
total_space: u64,
available_space: u64,
is_removable: bool,
is_readonly: bool,
}
impl MetricsExportable for Disks{
fn get_metric_type(&self) -> MetricType {
MetricType::Disks
}
impl MetricsExportable for Disks {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
@ -327,59 +459,53 @@ impl MetricsExportable for Disks{
// vec<Network>
#[derive(Serialize, Debug)]
struct Network {
iname : String,
mac : String,
recieved : u64,
transmitted : u64,
total_recieved_bytes : u64,
total_transmitted_bytes : u64,
total_recieved_packets : u64,
total_transmitted_packets : u64,
errors_on_recieved : u64,
errors_on_transmitted : u64,
iname: String,
mac: String,
ip_addresses: Vec<String>,
recieved: u64,
transmitted: u64,
total_recieved_bytes: u64,
total_transmitted_bytes: u64,
total_recieved_packets: u64,
total_transmitted_packets: u64,
errors_on_recieved: u64,
errors_on_transmitted: u64,
}
impl MetricsExportable for Ifaces {
fn get_metric_type(&self) -> MetricType {
MetricType::Ifaces
}
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Serialize, Debug)]
pub struct ProcessExtended {
name : String,
status : String,
pid : Pid,
dependencies : Dependencies,
cpu_usage : f32,
ram_usage : f32,
virtual_mem_usage : u64,
name: String,
status: String,
pid: Pid,
dependencies: Dependencies,
cpu_usage: f32,
ram_usage: f32,
virtual_mem_usage: u64,
disks_usage_read_bytes: u64,
disks_usage_write_bytes: u64,
}
impl ProcessExtended {
pub fn from_old_with_params(
old : Arc<TrackingProcess>,
pid : Pid,
status : ProcessState,
) -> Self {
pub fn from_old_with_params(old: Arc<TrackingProcess>, pid: Pid, status: ProcessState) -> Self {
Self {
name : old.name.clone(),
status : status.to_string(),
name: old.name.clone(),
status: status.to_string(),
pid,
dependencies : old.dependencies.clone(),
cpu_usage : 0.0,
ram_usage : 0.0,
virtual_mem_usage : 0,
dependencies: old.dependencies.clone(),
cpu_usage: 0.0,
ram_usage: 0.0,
virtual_mem_usage: 0,
disks_usage_read_bytes: 0,
disks_usage_write_bytes: 0,
}
}
fn add_metrics(&mut self, system : &mut System) {
fn add_metrics(&mut self, system: &mut System) {
if let Some(prc) = system.process(self.pid.new_sysinfo_pid()) {
self.cpu_usage = prc.cpu_usage() / system.cpus().len() as f32;
self.ram_usage = (system.total_memory() as f32) / (prc.memory() as f32);
@ -391,9 +517,6 @@ impl ProcessExtended {
}
impl MetricsExportable for MetricProcesses {
fn get_metric_type(&self) -> MetricType {
MetricType::Processes
}
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}

View File

@ -1,29 +1,30 @@
use crate::options::structs::{Events, NegativeOutcomes, ProcessState, ProcessUnit};
use async_trait::async_trait;
use log::{error, warn};
use serde::Serialize;
use std::collections::HashSet;
use std::process::{Command, Output};
use std::sync::Arc;
use tokio::time::Duration;
use crate::options::structs::{ProcessState, Events, NegativeOutcomes, ProcessUnit};
use std::collections::HashSet;
use tokio::sync::mpsc::Receiver as MpscReciever;
use async_trait::async_trait;
use serde::Serialize;
use tokio::time::Duration;
pub mod v2 {
use log::info;
use tokio::time::sleep;
use crate::options::structs::DependencyType;
use crate::utils::metrics::processes::{ProcessesAll, ProcessesGeneral, ProcessesQuery};
use log::info;
use std::path::Path;
use tokio::time::sleep;
use super::*;
#[derive(Debug, Serialize, Clone, Copy)]
pub struct Pid(u32);
pub struct Pid(u32);
impl std::fmt::Display for Pid {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
return write!(f, "{}", self.0);
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
return write!(f, "{}", self.0);
}
}
}
impl Pid {
fn new() -> Self {
@ -34,13 +35,12 @@ pub mod v2 {
sysinfo::Pid::from_u32(self.0 as u32)
}
}
#[derive(Debug)]
pub struct ProcessesController {
pub name: Arc<str>,
pub pid : Pid,
pid: Pid,
bin: String,
// obj: Arc<TrackingProcess>,
state: ProcessState,
event_reader: MpscReciever<Events>,
negative_events: HashSet<Arc<str>>,
@ -56,12 +56,12 @@ pub mod v2 {
#[inline(always)]
pub fn new(name: &str, event_reader: MpscReciever<Events>) -> ProcessesController {
ProcessesController {
name : Arc::from(name),
pid : Pid::new(),
bin : String::new(),
state : ProcessState::Stopped,
name: Arc::from(name),
pid: Pid::new(),
bin: String::new(),
state: ProcessState::Stopped,
event_reader,
negative_events : HashSet::new(),
negative_events: HashSet::new(),
}
}
#[inline(always)]
@ -69,85 +69,106 @@ pub mod v2 {
self.bin = bin.as_ref().to_string_lossy().into_owned();
self
}
#[allow(unused)]
pub fn get_pid(&self) -> Pid {
self.pid
}
pub fn get_state(&self) -> ProcessState {
self.state
}
async fn trigger_on(&mut self, dep_name: &str, trigger: &str, dep_type: DependencyType) {
match trigger {
"stay" => {
info!("Event on {} `{}` for {}. Ignoring ...", dep_type, dep_name, self.name);
},
info!(
"Event on {} `{}` for {}. Ignoring ...",
dep_type, dep_name, self.name
);
}
"stop" => {
if is_active(&self.name).await {
info!("Event on {} `{}` for {}. Stopping ...", dep_type, dep_name, self.name);
info!(
"Event on {} `{}` for {}. Stopping ...",
dep_type, dep_name, self.name
);
match terminate_process(&self.name).await {
Ok(_) => {
info!("Process {} was stopped ...", &self.name);
self.state = ProcessState::Stopped;
self.pid = Pid::new();
},
}
Err(er) => {
error!("Cannot stop process {} : {}", self.name, er);
},
}
}
}
},
}
"user-stop" => {
if is_active(&self.name).await {
info!("Event on {} `{}` for {}. Stopping ...", dep_type, "User Stop Call", self.name);
info!(
"Event on {} `{}` for {}. Stopping ...",
dep_type, "User Stop Call", self.name
);
match terminate_process(&self.name).await {
Ok(_) => {
info!("Process {} was forcefully stopped ...", &self.name);
self.state = ProcessState::StoppedByCli;
self.pid = Pid::new();
},
}
Err(er) => {
error!("Cannot forcefully stop process {} : {}", self.name, er);
},
}
}
}
},
}
"user-hold" => {
if is_active(&self.name).await {
info!("Event on {} `{}` for {}. Stopping ...", dep_type, "User Hold Call", self.name);
info!(
"Event on {} `{}` for {}. Stopping ...",
dep_type, "User Hold Call", self.name
);
match freeze_process(&self.name).await {
Ok(_) => {
info!("Process {} was forcefully frozen ...", &self.name);
self.state = ProcessState::HoldingByCli;
// self.pid = Pid::new();
},
}
Err(er) => {
error!("Cannot forcefully freeze process {} : {}", self.name, er);
},
}
}
}
},
}
"hold" => {
if !is_frozen(&self.name).await {
info!("Event on {} `{}` for {}. Freezing ...", dep_type, dep_name, self.name);
info!(
"Event on {} `{}` for {}. Freezing ...",
dep_type, dep_name, self.name
);
match freeze_process(&self.name).await {
Ok(_) => {
info!("Process {} was frozen ...", &self.name);
self.state = ProcessState::Holding;
},
}
Err(er) => {
error!("Cannot freeze process {} : {}", self.name, er);
},
}
}
}
},
}
"restart" => {
info!("Event on {} `{}` for {}. Restarting ...", dep_type, dep_name, self.name);
info!(
"Event on {} `{}` for {}. Restarting ...",
dep_type, dep_name, self.name
);
let pid = restart_process(&self.name, &self.bin).await;
sleep(Duration::from_millis(100)).await;
if let Ok(pid) = pid {
self.pid = Pid(pid);
info!("{}: New PID - {}", self.name, self.pid);
}
},
_ => error!("Impermissible trigger in file-trigger for {}. Ignoring event ...", self.name),
}
_ => error!(
"Impermissible trigger in file-trigger for {}. Ignoring event ...",
self.name
),
}
tokio::time::sleep(Duration::from_micros(100)).await;
}
@ -173,12 +194,12 @@ pub mod v2 {
warn!("Process {} was started by user call ...", self.name);
self.state = ProcessState::Pending;
self.pid = Pid(pid);
return Ok(())
return Ok(());
} else {
warn!("Attempt to start process {} by user call was stopped due to existance of negative incidents ...", self.name);
return Err(anyhow::Error::msg(
format!("Attempt to start process {} by user call was stopped due to existance of negative incidents ...", self.name)
))
));
}
}
#[allow(unused)]
@ -192,7 +213,7 @@ pub mod v2 {
warn!("Attempt to unfreeze process {} by user call was stopped due to existance of negative incidents ...", self.name);
return Err(anyhow::Error::msg(
format!("Attempt to unfreeze process {} by user call was stopped due to existance of negative incidents ...", self.name)
))
));
}
}
#[allow(unused)]
@ -202,6 +223,14 @@ pub mod v2 {
self.pid = Pid(pid);
Ok(())
}
pub async fn get_general_info(&self) -> ProcessesGeneral {
ProcessesGeneral {
name: self.name.to_string(),
state: self.state,
pid: self.pid,
}
}
}
#[async_trait]
@ -209,54 +238,66 @@ pub mod v2 {
async fn process(&mut self) {
if self.negative_events.len() == 0 {
let conditions = (is_active(&self.name).await, is_frozen(&self.name).await);
let state = &self.state;
let state = &self.state;
match (state, conditions) {
(ProcessState::Holding, (_, _)) => {
info!("No negative dependecies events on {} frozen process. Unfreezing ...", self.name);
info!(
"No negative dependecies events on {} frozen process. Unfreezing ...",
self.name
);
if let Err(er) = unfreeze_process(&self.name).await {
error!("Cannot unfreeze process {} : {}", self.name, er);
} else {
self.state = ProcessState::Pending;
info!("Process {} was unfreezed", &self.name);
}
},
}
(ProcessState::Stopped, (_, _)) => {
info!("No negative dependecies events on stopped {} process. Starting ...", self.name);
info!(
"No negative dependecies events on stopped {} process. Starting ...",
self.name
);
match start_process(&self.name, &self.bin).await {
Ok(pid) => {
self.state = ProcessState::Pending;
self.pid = Pid(pid);
info!("{}: New PID - {}", self.name, self.pid);
},
}
Err(er) => {
error!("Cannot start process {} : {}", self.name, er);
},
}
}
},
}
(ProcessState::Pending, (false, false)) => {
info!("{} process was impermissibly stopped. Starting ...", self.name);
info!(
"{} process was impermissibly stopped. Starting ...",
self.name
);
match start_process(&self.name, &self.bin).await {
Ok(pid) => {
self.state = ProcessState::Pending;
self.pid = Pid(pid);
info!("{}: New PID - {}", self.name, self.pid);
},
}
Err(er) => {
error!("Cannot start process {} : {}", self.name, er);
},
}
}
},
}
(ProcessState::Pending, (true, true)) => {
info!("No negative dependecies events on {} process. Unfreezing ...", self.name);
info!(
"No negative dependecies events on {} process. Unfreezing ...",
self.name
);
if let Err(er) = unfreeze_process(&self.name).await {
error!("Cannot unfreeze process {} : {}", self.name, er);
} else {
self.state = ProcessState::Pending;
info!("Process {} was unfreezed", &self.name);
}
},
_ => {},
}
}
_ => {}
}
}
while let Ok(event) = self.event_reader.try_recv() {
match event {
@ -264,22 +305,16 @@ pub mod v2 {
if self.negative_events.contains(&target) {
self.negative_events.remove(&target);
}
},
Events::Negative(event) => {
match event {
NegativeOutcomes::FileWasChanged(target, dep_type, trigger) |
NegativeOutcomes::FileWasMovedOrDeleted(target, dep_type, trigger) |
NegativeOutcomes::ServiceIsUnreachable(target, dep_type, trigger) => {
if !self.negative_events.contains(&target) {
self.negative_events.insert(target.clone());
self.trigger_on(
&target,
&trigger,
dep_type
).await;
}
},
}
Events::Negative(event) => match event {
NegativeOutcomes::FileWasChanged(target, dep_type, trigger)
| NegativeOutcomes::FileWasMovedOrDeleted(target, dep_type, trigger)
| NegativeOutcomes::ServiceIsUnreachable(target, dep_type, trigger) => {
if !self.negative_events.contains(&target) {
self.negative_events.insert(target.clone());
self.trigger_on(&target, &trigger, dep_type).await;
}
}
},
}
@ -290,7 +325,7 @@ pub mod v2 {
/// # Fn `get_pid`
/// ## for initializing process of unstoppable grubbing metrics.
///
///
/// *input* : `&str`
///
/// *output* : `None` if cant get process PID | `Some(Output)` on success
@ -299,8 +334,8 @@ pub mod v2 {
///
/// *managing* : process name
///
/// *depends on* : -
///
/// *depends on* : -
///
pub async fn get_pid(name: &str) -> Option<Output> {
let name = Arc::new(name.to_string());
let res =
@ -319,7 +354,7 @@ pub async fn get_pid(name: &str) -> Option<Output> {
/// # Fn `is_active`
/// ## for checking process's activity state
///
///
/// *input* : `&str`
///
/// *output* : `true` if process running | `false` if not
@ -328,8 +363,8 @@ pub async fn get_pid(name: &str) -> Option<Output> {
///
/// *managing* : process name
///
/// *depends on* : -
///
/// *depends on* : -
///
pub async fn is_active(name: &str) -> bool {
let arc_name = Arc::new(name.to_string());
tokio::task::spawn_blocking(move || {
@ -348,7 +383,7 @@ pub async fn is_active(name: &str) -> bool {
/// # Fn `is_frozen`
/// ## for checking process's hibernation state
///
///
/// *input* : `&str`
///
/// *output* : `true` if process is frozen | `false` if not
@ -358,7 +393,7 @@ pub async fn is_active(name: &str) -> bool {
/// *managing* : process name
///
/// *depends on* : fn `get_pid`
///
///
pub async fn is_frozen(name: &str) -> bool {
let temp: Output;
if let Some(output) = get_pid(name).await {
@ -389,7 +424,7 @@ pub async fn is_frozen(name: &str) -> bool {
/// # Fn `terminate_process`
/// ## for stop current process
///
///
/// *input* : `&str`
///
/// *output* : ()
@ -399,20 +434,21 @@ pub async fn is_frozen(name: &str) -> bool {
/// *managing* : process name
///
/// *depends on* : -
///
///
pub async fn terminate_process(name: &str) -> anyhow::Result<()> {
if !is_active(name).await {
return Err(anyhow::Error::msg(format!("Process {} is already stopped", name)))
}
let _ = Command::new("pkill")
.arg(name)
.output()?;
return Err(anyhow::Error::msg(format!(
"Process {} is already stopped",
name
)));
}
let _ = Command::new("pkill").arg(name).output()?;
Ok(())
}
/// # Fn `terminate_process`
/// ## for freeze/hibernate current process
///
///
/// *input* : `&str`
///
/// *output* : ()
@ -422,17 +458,15 @@ pub async fn terminate_process(name: &str) -> anyhow::Result<()> {
/// *managing* : process name
///
/// *depends on* : -
///
///
pub async fn freeze_process(name: &str) -> anyhow::Result<()> {
let _ = Command::new("pkill")
.args(["-STOP", name])
.output()?;
let _ = Command::new("pkill").args(["-STOP", name]).output()?;
Ok(())
}
/// # Fn `unfreeze_process`
/// ## for unfreeze/hibernate current process
///
///
/// *input* : `&str`
///
/// *output* : ()
@ -442,17 +476,15 @@ pub async fn freeze_process(name: &str) -> anyhow::Result<()> {
/// *managing* : process name
///
/// *depends on* : -
///
///
pub async fn unfreeze_process(name: &str) -> anyhow::Result<()> {
let _ = Command::new("pkill")
.args(["-CONT", name])
.output()?;
let _ = Command::new("pkill").args(["-CONT", name]).output()?;
Ok(())
}
/// # Fn `restart_process`
/// ## for restarting current process
///
///
/// *input* : `&str`, &str
///
/// *output* : ()
@ -462,7 +494,7 @@ pub async fn unfreeze_process(name: &str) -> anyhow::Result<()> {
/// *managing* : process name and path to its exec file
///
/// *depends on* : fn `start_process`, fn `terminate_process`
///
///
pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<u32> {
terminate_process(name).await?;
tokio::time::sleep(Duration::from_millis(100)).await;
@ -471,7 +503,7 @@ pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<u32> {
/// # Fn `start_process`
/// ## for starting current process
///
///
/// *input* : `&str`, &str
///
/// *output* : ()
@ -481,11 +513,14 @@ pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<u32> {
/// *managing* : process name and path to its exec file
///
/// *depends on* : -
///
///
pub async fn start_process(name: &str, path: &str) -> anyhow::Result<u32> {
if is_active(name).await {
return Err(anyhow::Error::msg(format!("Process {} is already running", name)))
}
return Err(anyhow::Error::msg(format!(
"Process {} is already running",
name
)));
}
let mut command = Command::new(path);
// command.arg(path);
@ -495,9 +530,10 @@ pub async fn start_process(name: &str, path: &str) -> anyhow::Result<u32> {
warn!("Process {} is running now!", name);
Ok(pid)
}
Err(er) => {
Err(anyhow::Error::msg(format!("Cannot start process {} : {}", name, er)))
}
Err(er) => Err(anyhow::Error::msg(format!(
"Cannot start process {} : {}",
name, er
))),
}
}
@ -516,8 +552,7 @@ mod process_unittests {
// let _ = std::io::stdout().write_all(b"");
let res1 = start_process("restart-prc", "./tests/examples/restart-prc").await;
assert!(res1.is_ok());
let res2 =
restart_process("restart-prc", "./tests/examples/restart-prc").await;
let res2 = restart_process("restart-prc", "./tests/examples/restart-prc").await;
assert!(res2.is_ok());
let _ = terminate_process("restart-prc").await;
let res3 = is_active("restart-prc").await;

View File

@ -1,20 +1,20 @@
use async_trait::async_trait;
use futures::future::Future;
use log::{error, warn};
use std::net::ToSocketAddrs;
use std::sync::Arc;
use tokio::time::Duration;
use tokio::sync::mpsc::Sender as Sender;
use async_trait::async_trait;
use std::pin::Pin;
use futures::future::Future;
use std::sync::Arc;
use tokio::sync::mpsc::Sender;
use tokio::time::Duration;
pub mod v2 {
use futures::FutureExt;
use log::info;
use crate::options::structs::{Triggers, ProcessUnit, Events, ServiceState};
use crate::options::structs::{Events, ProcessUnit, ServiceState, Triggers};
use super::*;
use std::collections::{HashMap, BTreeMap, VecDeque};
use std::collections::{BTreeMap, HashMap, VecDeque};
type MpscSender = Arc<Sender<Events>>;
// type EventHandlers<'a> = Vec<MpscSender<Events<'a>>>;
@ -26,20 +26,20 @@ pub mod v2 {
pub struct ServicesController {
// i.e. yandex.ru
#[allow(unused)]
name : String,
name: String,
// i.e. yandex.ru:443
access_url : Arc<str>,
access_url: Arc<str>,
// "OK" or "Unavailable"
state: ServiceState,
// btree map with key as max wait time and it's key to hashmap
config: ConnectionQueue,
// Map of processes with their (trigger and mpsc sender)
event_registrator : EventHandlers,
event_registrator: EventHandlers,
}
impl PartialEq for ServicesController {
fn eq(&self, other: &Self) -> bool {
self.access_url == other.access_url
self.access_url == other.access_url
}
}
@ -47,26 +47,22 @@ pub mod v2 {
#[inline(always)]
pub fn new() -> ServicesController {
ServicesController {
name : String::new(),
access_url : Arc::from(String::new()),
state : ServiceState::Unavailable,
name: String::new(),
access_url: Arc::from(String::new()),
state: ServiceState::Unavailable,
config: ConnectionQueue::new(),
event_registrator : EventHandlers::new(),
event_registrator: EventHandlers::new(),
}
}
#[inline(always)]
pub fn with_access_name(
mut self,
hostname: &str,
access_url: &str,
) -> ServicesController {
pub fn with_access_name(mut self, hostname: &str, access_url: &str) -> ServicesController {
self.name = hostname.to_string();
self.access_url = Arc::from(access_url);
self
}
#[inline(always)]
pub fn with_params(
mut self,
mut self,
conn_queue: ConnectionQueue,
event_reg: EventHandlers,
) -> ServicesController {
@ -76,49 +72,61 @@ pub mod v2 {
}
pub fn get_access_url(hostname: &str, port: Option<&u32>) -> String {
format!("{}{}", hostname, port.map_or_else(|| "".to_string(), |p| format!(":{}", p)))
format!(
"{}{}",
hostname,
port.map_or_else(|| "".to_string(), |p| format!(":{}", p))
)
}
pub fn add_process(
&mut self,
proc_name: &str,
trigger: Triggers,
sender: MpscSender,
) {
pub fn get_state(&self) -> ServiceState {
self.state
}
pub fn add_process(&mut self, proc_name: &str, trigger: Triggers, sender: MpscSender) {
let proc_name: Arc<str> = Arc::from(proc_name);
// queue add
if let Triggers::Service { wait, .. } = trigger {
self.config.entry(wait)
.and_modify(|el| el.push_back(proc_name.clone()))
.or_insert({
let mut temp = VecDeque::new();
temp.push_back(proc_name.clone());
temp
});
self.config
.entry(wait)
.and_modify(|el| el.push_back(proc_name.clone()))
.or_insert({
let mut temp = VecDeque::new();
temp.push_back(proc_name.clone());
temp
});
}
// event add
self.event_registrator.entry(proc_name).or_insert((trigger, sender));
self.event_registrator
.entry(proc_name)
.or_insert((trigger, sender));
}
async fn check_state(&self) -> anyhow::Result<()> {
let url = self.access_url.clone();
let resolve_future = tokio::task::spawn_blocking(move || {
url.to_socket_addrs()
});
let addrs: Vec<_> = match tokio::time::timeout(Duration::from_secs(1), resolve_future).await {
Ok(Ok(addrs)) => addrs?.collect(),
Ok(Err(er)) => return Err(er.into()),
Err(_) => return Err(anyhow::Error::msg("DNS resolution timeout")),
};
let resolve_future = tokio::task::spawn_blocking(move || url.to_socket_addrs());
let addrs: Vec<_> =
match tokio::time::timeout(Duration::from_secs(1), resolve_future).await {
Ok(Ok(addrs)) => addrs?.collect(),
Ok(Err(er)) => return Err(er.into()),
Err(_) => return Err(anyhow::Error::msg("DNS resolution timeout")),
};
if addrs.is_empty() {
return Err(anyhow::Error::msg("No addresses resolved"));
}
let tasks: Vec<_> = addrs.into_iter().map(|addr| async move {
match tokio::time::timeout(Duration::from_secs(2), tokio::net::TcpStream::connect(&addr)).await {
Ok(Ok(_)) => Some(addr),
_ => None,
}
}).collect();
let tasks: Vec<_> = addrs
.into_iter()
.map(|addr| async move {
match tokio::time::timeout(
Duration::from_secs(2),
tokio::net::TcpStream::connect(&addr),
)
.await
{
Ok(Ok(_)) => Some(addr),
_ => None,
}
})
.collect();
let mut any_success = false;
for task in futures::future::join_all(tasks).await {
if task.is_some() {
@ -127,7 +135,10 @@ pub mod v2 {
}
}
if !any_success {
return Err(anyhow::Error::msg(format!("No access to service `{}`", &self.access_url)));
return Err(anyhow::Error::msg(format!(
"No access to service `{}`",
&self.access_url
)));
}
Ok(())
@ -135,7 +146,9 @@ pub mod v2 {
async fn trigger_on(&mut self) {
match self.state {
ServiceState::Ok => {
let futures : Vec<Pin<Box<dyn Future<Output = ()> + Send>>> = self.event_registrator.iter()
let futures: Vec<Pin<Box<dyn Future<Output = ()> + Send>>> = self
.event_registrator
.iter()
.map(|(prc, (_, sender_opt))| (prc, (self.access_url.clone(), sender_opt)))
.map(|(prc, (serv, sender_opt))| async move {
info!("Notifying process {} ...", prc);
@ -144,73 +157,100 @@ pub mod v2 {
.map(|fut| fut.boxed())
.collect();
futures::future::join_all(futures).await;
},
futures::future::join_all(futures).await;
}
ServiceState::Unavailable => {
// looped check and notifying
self.looped_check().await;
},
}
}
}
async fn looped_check(self: &mut Self) {
let longest = self.config.last_entry().unwrap();
let longest = longest.key();
let mut interapter = tokio::time::interval(tokio::time::Duration::from_secs(1));
let timer = tokio::time::Instant::now();
let timer = tokio::time::Instant::now();
let mut attempt: u32 = 1;
let access_url = Arc::new(self.access_url.clone());
if let Err(_) = tokio::time::timeout(tokio::time::Duration::from_secs((longest + 1) as u64), async {
// let access_url = access_url.clone();
loop {
interapter.tick().await;
info!("Trying to connect to {} (attempt: {}) ...", &access_url, attempt);
attempt += 1;
if let Err(_) = tokio::time::timeout(
tokio::time::Duration::from_secs((longest + 1) as u64),
async {
// let access_url = access_url.clone();
loop {
interapter.tick().await;
info!(
"Trying to connect to {} (attempt: {}) ...",
&access_url, attempt
);
attempt += 1;
let state_check_result = self.check_state().await;
let state_check_result = self.check_state().await;
if state_check_result.is_ok() {
info!("Connection to {} is `OK` now", &access_url);
self.state = ServiceState::Ok;
let futures : Vec<Pin<Box<dyn Future<Output = ()> + Send>>> = self.event_registrator.iter()
.map(|(prc, (_, sender_opt))| (prc, (self.access_url.clone(), sender_opt)))
.map(|(prc, (serv, sender_opt))| async move {
info!("Notifying process {} ...", prc);
let _ = sender_opt.send(Events::Positive(serv.clone()));
})
.map(|fut| fut.boxed())
.collect();
if state_check_result.is_ok() {
info!("Connection to {} is `OK` now", &access_url);
self.state = ServiceState::Ok;
let futures: Vec<Pin<Box<dyn Future<Output = ()> + Send>>> = self
.event_registrator
.iter()
.map(|(prc, (_, sender_opt))| {
(prc, (self.access_url.clone(), sender_opt))
})
.map(|(prc, (serv, sender_opt))| async move {
info!("Notifying process {} ...", prc);
let _ = sender_opt.send(Events::Positive(serv.clone()));
})
.map(|fut| fut.boxed())
.collect();
futures::future::join_all(futures).await;
break;
} else {
let now = timer.elapsed();
futures::future::join_all(futures).await;
break;
} else {
let now = timer.elapsed();
let iterator = self.config.iter()
.filter(|(&wait, _)| tokio::time::Duration::from_secs(wait as u64) <= now)
.flat_map(|(_, a)| a.iter().cloned())
.collect::<VecDeque<Arc<str>>>();
let iterator = self
.config
.iter()
.filter(|(&wait, _)| {
tokio::time::Duration::from_secs(wait as u64) <= now
})
.flat_map(|(_, a)| a.iter().cloned())
.collect::<VecDeque<Arc<str>>>();
for name in iterator {
let proc_name = name.to_string();
info!("Trying to notify process `{}` ...", &proc_name);
let sender_opt = self.event_registrator.get(&name)
.map(|(trigger, sender)|
(trigger.to_service_negative_event(self.access_url.clone()), sender)
);
for name in iterator {
let proc_name = name.to_string();
info!("Trying to notify process `{}` ...", &proc_name);
let sender_opt =
self.event_registrator.get(&name).map(|(trigger, sender)| {
(
trigger
.to_service_negative_event(self.access_url.clone()),
sender,
)
});
if let Some((tr, tx)) = sender_opt {
let _ = tx.send(tr.unwrap()).await;
} else {
error!("Cannot find {} channel sender in {} service", name.clone(), &self.access_url)
if let Some((tr, tx)) = sender_opt {
let _ = tx.send(tr.unwrap()).await;
} else {
error!(
"Cannot find {} channel sender in {} service",
name.clone(),
&self.access_url
)
}
}
}
}
}
}).await {
},
)
.await
{
info!("Timeout of establishing connection to {}. ", &access_url);
}
}
pub fn get_arc_access_url(&self) -> Arc<str> {
self.access_url.clone()
}
}
#[async_trait]
impl ProcessUnit for ServicesController {
@ -222,14 +262,16 @@ pub mod v2 {
warn!("Connection with `{}` service was established. Notifying {} process(es) ...", &self.access_url, &self.event_registrator.len());
self.state = ServiceState::Ok;
self.trigger_on().await;
},
}
(ServiceState::Ok, Err(_)) => {
warn!("Unreachable for connection service `{}`. Initializing reconnect mechanism ...", &self.access_url);
self.state = ServiceState::Unavailable;
self.trigger_on().await;
},
(ServiceState::Unavailable, Err(_)) => warn!("Service {} is still unreachable", &self.access_url),
_ => { /* DEAD END WITH NO INTEREST */ },
self.trigger_on().await;
}
(ServiceState::Unavailable, Err(_)) => {
warn!("Service {} is still unreachable", &self.access_url)
}
_ => { /* DEAD END WITH NO INTEREST */ }
}
}
}