From e9b6abefdf2f0bd0d1303c5732e3d7ed0b851e53 Mon Sep 17 00:00:00 2001 From: prplV Date: Mon, 3 Feb 2025 12:12:18 +0300 Subject: [PATCH 01/44] preboot changed + config setting up --- noxis-rs/src/main.rs | 134 ++++++++++++++++---------------- noxis-rs/src/options/config.rs | 48 ++++++++++-- noxis-rs/src/options/preboot.rs | 57 +++++++------- noxis-rs/src/options/structs.rs | 2 + 4 files changed, 139 insertions(+), 102 deletions(-) diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index 29886c8..f29d2c0 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -25,73 +25,73 @@ async fn main() -> anyhow::Result<()>{ // setting up redis connection \ // then conf checks to choose the most actual \ - let processes: Processes = get_actual_config(preboot.clone()).await.unwrap_or_else(|| { - error!("No actual configuration for runner. Stopping..."); - std::process::exit(1); - }); - - info!( - "Current runner configuration: {}", - &processes.date_of_creation - ); - info!("Runner is ready. Initializing..."); - - if processes.processes.is_empty() { - error!("Processes list is null, runner-rs initialization is stopped"); - return Err(Error::msg("Empty processes segment in config")); - } - let mut handler: Vec> = vec![]; - // is in need to send to the signals handler thread - let mut senders: Vec>> = vec![]; - - for proc in processes.processes.iter() { - info!( - "Process '{}' on stage: {}. Depends on {} file(s), {} service(s)", - proc.name, - proc.path, - proc.dependencies.files.len(), - proc.dependencies.services.len() - ); - - // creating msg channel - // can or should be executed in new thread - let (tx, mut rx) = mpsc::channel::(1); - let proc = Arc::new(proc.clone()); - let tx = Arc::new(tx.clone()); - - senders.push(Arc::clone(&tx.clone())); - - let event = tokio::spawn(async move { - run_daemons(proc.clone(), tx.clone(), &mut rx).await; - }); - handler.push(event); - } - - // destructor addition - handler.push(tokio::spawn(async move { - if set_valid_destructor(Arc::new(senders)).await.is_err() { - error!("Linux signals handler creation failed. Terminating main thread..."); - return; - } - - tokio::time::sleep(Duration::from_millis(200)).await; - info!("End of job. Terminating main thread..."); - std::process::exit(0); - })); - - // remote config update subscription - handler.push(tokio::spawn(async move { - let _ = subscribe_config_stream(Arc::new(processes), preboot.clone()).await; - })); - - // cli pipeline - handler.push(tokio::spawn(async move { - let _ = init_cli_pipeline().await; - })); - - for i in handler { - let _ = i.await; - } + // let processes: Processes = get_actual_config(preboot.clone()).await.unwrap_or_else(|| { + // error!("No actual configuration for runner. Stopping..."); + // std::process::exit(1); + // }); + // + // info!( + // "Current runner configuration: {}", + // &processes.date_of_creation + // ); + // info!("Runner is ready. Initializing..."); + // + // if processes.processes.is_empty() { + // error!("Processes list is null, runner-rs initialization is stopped"); + // return Err(Error::msg("Empty processes segment in config")); + // } + // let mut handler: Vec> = vec![]; + // // is in need to send to the signals handler thread + // let mut senders: Vec>> = vec![]; + // + // for proc in processes.processes.iter() { + // info!( + // "Process '{}' on stage: {}. Depends on {} file(s), {} service(s)", + // proc.name, + // proc.path, + // proc.dependencies.files.len(), + // proc.dependencies.services.len() + // ); + // + // // creating msg channel + // // can or should be executed in new thread + // let (tx, mut rx) = mpsc::channel::(1); + // let proc = Arc::new(proc.clone()); + // let tx = Arc::new(tx.clone()); + // + // senders.push(Arc::clone(&tx.clone())); + // + // let event = tokio::spawn(async move { + // run_daemons(proc.clone(), tx.clone(), &mut rx).await; + // }); + // handler.push(event); + // } + // + // // destructor addition + // handler.push(tokio::spawn(async move { + // if set_valid_destructor(Arc::new(senders)).await.is_err() { + // error!("Linux signals handler creation failed. Terminating main thread..."); + // return; + // } + // + // tokio::time::sleep(Duration::from_millis(200)).await; + // info!("End of job. Terminating main thread..."); + // std::process::exit(0); + // })); + // + // // remote config update subscription + // handler.push(tokio::spawn(async move { + // let _ = subscribe_config_stream(Arc::new(processes), preboot.clone()).await; + // })); + // + // // cli pipeline + // handler.push(tokio::spawn(async move { + // let _ = init_cli_pipeline().await; + // })); + // + // for i in handler { + // let _ = i.await; + // } Ok(()) } diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index d9451b9..1439ae7 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -1,6 +1,6 @@ use super::structs::*; use log::{error, info, warn}; -use redis::{Client, Connection}; +use redis::{Client, Connection, PubSub}; use std::fs::OpenOptions; use std::io::Write; use std::os::unix::process::CommandExt; @@ -9,9 +9,47 @@ use std::sync::Arc; use std::{env, fs}; use super::preboot::PrebootParams; use tokio::time::{Duration, sleep}; +// use redis::PubSub; +use tokio::sync::oneshot::Receiver; // const CONFIG_PATH: &str = "settings.json"; +pub mod v2 { + use super::*; + pub async fn init_config_mechanism(cli_oneshot: Arc> /*...*/) { /* local + pubsub + cli oneshot check */ } + pub async fn get_pubsub<'a>(params: Arc) -> Option> { + let config_path = params.config.to_str().unwrap_or_else(|| "settings.json"); + + if params.no_sub || params.no_sub { + return None; + } + if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { + if let Ok(mut conn) = client.get_connection() { + match crate::utils::get_container_id() { + Some(channel_name) => { + let channel_name = channel_name.trim(); + let mut pubsub = conn.as_pubsub(); + if pubsub.subscribe(&channel_name).is_ok() { + + } else { + error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name); + } + }, + None => { + error!("Cannot get channel name"); + } + } + } + } + error!("Error with subscribing Redis stream on update. Working only with selected config..."); + None + } + pub async fn get_local_config_watcher(/*...*/) { /*...*/ } + // + pub async fn cli_config_reciever(cli_oneshot: Arc>) { /*...*/ } +} + + /// # Fn `load_processes` /// ## for reading and parsing *local* storing config /// @@ -54,14 +92,14 @@ pub async fn get_actual_config(params : Arc) -> Option error!("Invalid character in config file. Config path was set to default"); "settings.json" }); - info!("Configurating config module with params: no-remote-config={}, no-sub={}, local config path={:?}, remote server={}", params.no_remote_config, params.no_sub, params.config, params.remote_server_url); + info!("Configurating config module with params: no-sub={}, local config path={:?}, remote server={}", params.no_sub, params.config, params.remote_server_url); match load_processes(config_path) { Some(local_conf) => { info!( "Found local configuration, version - {}", &local_conf.date_of_creation ); - if !params.no_remote_config { + if !params.no_sub { if let Some(remote_conf) = // TODO : rework with pubsub mech once_get_remote_configuration(&format!("redis://{}/", ¶ms.remote_server_url)) @@ -85,7 +123,7 @@ pub async fn get_actual_config(params : Arc) -> Option } None => { warn!("No local valid conf was found. Trying to pull remote one..."); - if !params.no_remote_config { + if !params.no_sub { let mut conn = get_connection_watcher(&open_watcher(&format!("redis://{}/", ¶ms.remote_server_url))); if let Some(conf) = get_remote_conf_watcher(&mut conn).await { info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation); @@ -322,7 +360,7 @@ fn restart_main_thread() -> std::io::Result<()> { pub async fn subscribe_config_stream(actual_prcs: Arc, params: Arc) -> Result<(), CustomError> { let config_path = params.config.to_str().unwrap_or_else(|| "settings.json"); - if params.no_sub || params.no_remote_config { + if params.no_sub || params.no_sub { return Err(CustomError::Fatal); } if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { diff --git a/noxis-rs/src/options/preboot.rs b/noxis-rs/src/options/preboot.rs index 8293f8a..d21cd57 100644 --- a/noxis-rs/src/options/preboot.rs +++ b/noxis-rs/src/options/preboot.rs @@ -13,7 +13,7 @@ enum EnvVars { NoxisNoHagent, NoxisNoLogs, NoxisRefreshLogs, - NoxisNoRemoteConfig, + // NoxisNoRemoteConfig, NoxisNoConfigSub, NoxisSocketPath, NoxisLogTo, @@ -29,7 +29,7 @@ impl std::fmt::Display for EnvVars { EnvVars::NoxisNoHagent => write!(f, "NOXIS_NO_HAGENT"), EnvVars::NoxisNoLogs => write!(f, "NOXIS_NO_LOGS"), EnvVars::NoxisRefreshLogs => write!(f, "NOXIS_REFRESH_LOGS"), - EnvVars::NoxisNoRemoteConfig => write!(f, "NOXIS_NO_REMOTE_CONFIG"), + // EnvVars::NoxisNoRemoteConfig => write!(f, "NOXIS_NO_REMOTE_CONFIG"), EnvVars::NoxisNoConfigSub => write!(f, "NOXIS_NO_CONFIG_SUB"), EnvVars::NoxisSocketPath => write!(f, "NOXIS_SOCKET_PATH"), EnvVars::NoxisLogTo => write!(f, "NOXIS_LOG_TO"), @@ -48,7 +48,7 @@ impl<'a> EnvVars { EnvVars::NoxisNoHagent => "false", EnvVars::NoxisNoLogs => "false", EnvVars::NoxisRefreshLogs => "false", - EnvVars::NoxisNoRemoteConfig => "false", + // EnvVars::NoxisNoRemoteConfig => "false", EnvVars::NoxisNoConfigSub => "false", EnvVars::NoxisSocketPath => "/var/run/enode/hostagent.sock", EnvVars::NoxisLogTo => "./", @@ -77,7 +77,7 @@ impl<'a> EnvVars { Self::NoxisNoHagent.process_env_var(&preboot.no_hostagent.to_string()); Self::NoxisNoLogs.process_env_var(&preboot.no_logs.to_string()); Self::NoxisRefreshLogs.process_env_var(&preboot.refresh_logs.to_string()); - Self::NoxisNoRemoteConfig.process_env_var(&preboot.no_remote_config.to_string()); + // Self::NoxisNoRemoteConfig.process_env_var(&preboot.no_remote_config.to_string()); Self::NoxisNoConfigSub.process_env_var(&preboot.no_sub.to_string()); Self::NoxisSocketPath.process_env_var(preboot.socket_path.to_str().unwrap()); Self::NoxisLogTo.process_env_var(preboot.log_to.to_str().unwrap()); @@ -147,12 +147,6 @@ impl std::fmt::Display for MetricsPrebootParams { /// noxis-rs ... --refresh-logs ... /// ``` /// -/// `--no-remote-config` - to disable work with Redis as config producer -/// ### usage : -/// ``` bash -/// noxis-rs ... --no-remote-config ... -/// ``` -/// /// `--no-sub` - to disable Redis subscribtion mechanism /// ### usage : /// ``` bash @@ -212,17 +206,18 @@ pub struct PrebootParams { help="To clear logs directory" )] pub refresh_logs : bool, - #[arg( - long = "no-remote-config", - action, - help="To disable work with remote config server", - conflicts_with="no_sub")] - pub no_remote_config : bool, + // #[arg( + // long = "no-remote-config", + // action, + // help="To disable work with remote config server", + // conflicts_with="no_sub")] + // pub no_remote_config : bool, #[arg( long = "no-sub", action, - help="To disable subscription mechanism", - conflicts_with="no_remote_config")] + help="To disable Redis subscription mechanism", + )] + // conflicts_with="no_remote_config" pub no_sub : bool, // params (socket_path, log_to, remote_server_url, config) @@ -243,7 +238,7 @@ pub struct PrebootParams { #[arg( long = "remote-server-url", default_value="localhost", - conflicts_with="no_remote_config", + conflicts_with="no_sub", help = "To set url of remote config server using in remote config pulling mechanism" )] pub remote_server_url : String, @@ -288,15 +283,17 @@ impl PrebootParams { // existing log dir if !self.log_to.exists() && !self.no_logs { eprintln!("Error: Log-Dir not found or Noxis can't read it. LogDir was set to default"); + self.refresh_logs = false; self.log_to = PathBuf::from("./"); // return Err(Error::msg("Log Directory Not Found or Noxis can't read it. Cannot start")); } // existing sock file if !self.config.exists() { eprintln!("Error: Invalid character in config file. Config path was set to default"); - let config = PathBuf::from("/etc/settings.json"); - if !config.exists() && self.no_remote_config { - return Err(Error::msg("Noxis cannot run without config. Create local config or enable remote-config mechanism")); + // TODO : ??? wtf is going with 2 paths + let config = PathBuf::from("/etc/enode/noxis/settings.json"); + if !config.exists() && self.no_sub { + return Err(Error::msg("Noxis cannot run without config. Create local config or enable pubsub mechanism")); } self.config = PathBuf::from("settings.json"); // return Err(Error::msg("Local Config Not Found or Noxis can't read it. Cannot start")); @@ -353,20 +350,20 @@ mod preboot_unitests{ "runner-rs", "--no-sub", "--remote-server-url", "redis://127.0.0.1" - ]).is_ok()) - } - #[test] - fn parsing_config_invalid_args_noremote_nosub() { - assert!(PrebootParams::try_parse_from(vec![ - "runner-rs", - "--no-remote-config", "--no-sub" ]).is_err()) } + // #[test] + // fn parsing_config_invalid_args_noremote_nosub() { + // assert!(PrebootParams::try_parse_from(vec![ + // "runner-rs", + // "--no-remote-config", "--no-sub" + // ]).is_err()) + // } #[test] fn parsing_config_invalid_args_noremote_remoteurl() { assert!(PrebootParams::try_parse_from(vec![ "runner-rs", - "--no-remote-config", + "--no-sub", "--remote-server-url", "redis://127.0.0.1" ]).is_err()) } diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 65c1a19..12c6019 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -40,6 +40,8 @@ pub struct Processes { pub processes: Vec, } + + /// # Struct for the 2nd level in json conf file /// ## for each process to contain info, such as name, path and dependencies /// -- 2.40.1 From df03bd5346e919df580d6328c86a0eb5af91e043 Mon Sep 17 00:00:00 2001 From: prplV Date: Mon, 3 Feb 2025 13:58:01 +0300 Subject: [PATCH 02/44] config pubsub pre-dev skeleton --- noxis-rs/src/options/config.rs | 41 ++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 1439ae7..cabab29 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -18,30 +18,37 @@ pub mod v2 { use super::*; pub async fn init_config_mechanism(cli_oneshot: Arc> /*...*/) { /* local + pubsub + cli oneshot check */ } pub async fn get_pubsub<'a>(params: Arc) -> Option> { - let config_path = params.config.to_str().unwrap_or_else(|| "settings.json"); - - if params.no_sub || params.no_sub { + // let config_path = params.config.to_str().unwrap_or_else(|| "settings.json"); + if params.no_sub { return None; } - if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { - if let Ok(mut conn) = client.get_connection() { - match crate::utils::get_container_id() { - Some(channel_name) => { - let channel_name = channel_name.trim(); - let mut pubsub = conn.as_pubsub(); - if pubsub.subscribe(&channel_name).is_ok() { + let mut connection_delay: u64 = 1; + loop { + if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { + if let Ok(mut conn) = client.get_connection() { + match crate::utils::get_container_id() { + Some(channel_name) => { + // let channel_name = channel_name.trim(); + let mut pubsub = conn.as_pubsub(); + if pubsub.subscribe(&channel_name.trim()).is_ok() { - } else { - error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name); + todo!() + + } else { + error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name); + } + }, + None => { + error!("Cannot get channel name"); } - }, - None => { - error!("Cannot get channel name"); } } } + error!("Error with subscribing Redis stream on update. Retrying in {} secs...", connection_delay); + sleep(Duration::from_secs(connection_delay)).await; + connection_delay *= 2; } - error!("Error with subscribing Redis stream on update. Working only with selected config..."); + None } pub async fn get_local_config_watcher(/*...*/) { /*...*/ } @@ -360,7 +367,7 @@ fn restart_main_thread() -> std::io::Result<()> { pub async fn subscribe_config_stream(actual_prcs: Arc, params: Arc) -> Result<(), CustomError> { let config_path = params.config.to_str().unwrap_or_else(|| "settings.json"); - if params.no_sub || params.no_sub { + if params.no_sub { return Err(CustomError::Fatal); } if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { -- 2.40.1 From 7a5704dd93e23c7126209d1c233f06ab1e4cb0f8 Mon Sep 17 00:00:00 2001 From: prplV Date: Tue, 4 Feb 2025 16:53:29 +0300 Subject: [PATCH 03/44] cli_config_reciever fn added --- noxis-rs/src/options/config.rs | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index cabab29..35ed603 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -15,10 +15,13 @@ use tokio::sync::oneshot::Receiver; // const CONFIG_PATH: &str = "settings.json"; pub mod v2 { + use std::ops::Deref; + use super::*; - pub async fn init_config_mechanism(cli_oneshot: Arc> /*...*/) { /* local + pubsub + cli oneshot check */ } - pub async fn get_pubsub<'a>(params: Arc) -> Option> { - // let config_path = params.config.to_str().unwrap_or_else(|| "settings.json"); + pub async fn init_config_mechanism(cli_oneshot: Arc> /*...*/) { + /* local + pubsub + cli oneshot check */ + } + pub async fn get_redis_connection(params: Arc) -> Option { if params.no_sub { return None; } @@ -31,7 +34,7 @@ pub mod v2 { // let channel_name = channel_name.trim(); let mut pubsub = conn.as_pubsub(); if pubsub.subscribe(&channel_name.trim()).is_ok() { - + todo!() } else { @@ -51,9 +54,18 @@ pub mod v2 { None } - pub async fn get_local_config_watcher(/*...*/) { /*...*/ } - // - pub async fn cli_config_reciever(cli_oneshot: Arc>) { /*...*/ } + pub async fn get_local_config_watcher(params: Arc /*...*/) { + /*...*/ + } + + // [:IN-TEST] + pub async fn cli_config_reciever(cli_oneshot: Receiver) -> Option { + /* match awaits til channel*/ + match cli_oneshot.await { + Ok(config_from_cli) => return Some(config_from_cli), + _ => None, + } + } } -- 2.40.1 From 0f160f4dcd4b4ffbe63cb81d2ae619391bc756f4 Mon Sep 17 00:00:00 2001 From: prplV Date: Tue, 4 Feb 2025 17:57:55 +0300 Subject: [PATCH 04/44] deafult processes --- noxis-rs/src/options/structs.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 12c6019..18ae5e9 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -40,7 +40,21 @@ pub struct Processes { pub processes: Vec, } +impl Default for Processes { + fn default() -> Self { + Self { + date_of_creation : String::new(), + config_server : String::from("default"), + processes : Vec::new(), + } + } +} +impl Processes { + pub fn is_default(&self) -> bool { + self.date_of_creation.is_empty() + } +} /// # Struct for the 2nd level in json conf file /// ## for each process to contain info, such as name, path and dependencies -- 2.40.1 From f560dfebc593e49a76975ae5711740fff944c68a Mon Sep 17 00:00:00 2001 From: prplV Date: Tue, 4 Feb 2025 18:34:21 +0300 Subject: [PATCH 05/44] planning +lcr --- noxis-rs/src/options/config.rs | 36 ++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 35ed603..dd53611 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -10,15 +10,21 @@ use std::{env, fs}; use super::preboot::PrebootParams; use tokio::time::{Duration, sleep}; // use redis::PubSub; -use tokio::sync::oneshot::Receiver; +use tokio::sync::oneshot::Receiver as OneShotReciever; +use tokio::sync::broadcast::Sender as BroadcastSender; // const CONFIG_PATH: &str = "settings.json"; pub mod v2 { - use std::ops::Deref; - use super::*; - pub async fn init_config_mechanism(cli_oneshot: Arc> /*...*/) { + + pub async fn init_config_mechanism( + // to handle cli config changes + _cli_oneshot: OneShotReciever, + // to share local config with PRCS and CLI_PIPELINE modules + _brd_tx : Arc> + /*...*/ + ) { /* local + pubsub + cli oneshot check */ } pub async fn get_redis_connection(params: Arc) -> Option { @@ -54,12 +60,30 @@ pub mod v2 { None } - pub async fn get_local_config_watcher(params: Arc /*...*/) { + + // + pub async fn local_config_reciever( + params : Arc, + pubsub_oneshot : OneShotReciever, + brd_tx : Arc>, + /*...*/ + ) { /*...*/ + + // {:1} if local config is not exist -> cannot create watcher -> None + // {:2} if local config exists -> load_processes + // | + // | [Ok(Processes)] + // -> 1) broadcast sending parsed config to PRCS and CLI_PIPELINE + // 2) watcher in loop to deny local changes + // | + // | Err(_) + // -> + // ???? } // [:IN-TEST] - pub async fn cli_config_reciever(cli_oneshot: Receiver) -> Option { + pub async fn cli_config_reciever(cli_oneshot: OneShotReciever) -> Option { /* match awaits til channel*/ match cli_oneshot.await { Ok(config_from_cli) => return Some(config_from_cli), -- 2.40.1 From 8c1998c93f03a5c59fb994a7331ea991be2f77a5 Mon Sep 17 00:00:00 2001 From: prplV Date: Wed, 5 Feb 2025 14:47:08 +0300 Subject: [PATCH 06/44] local_config_reciever created with fn's helpers --- noxis-rs/src/options/config.rs | 166 ++++++++++++++++++++++++++------- 1 file changed, 131 insertions(+), 35 deletions(-) diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index dd53611..3deeb80 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -1,6 +1,6 @@ use super::structs::*; use log::{error, info, warn}; -use redis::{Client, Connection, PubSub}; +use redis::{Client, Connection}; use std::fs::OpenOptions; use std::io::Write; use std::os::unix::process::CommandExt; @@ -10,12 +10,19 @@ use std::{env, fs}; use super::preboot::PrebootParams; use tokio::time::{Duration, sleep}; // use redis::PubSub; -use tokio::sync::oneshot::Receiver as OneShotReciever; -use tokio::sync::broadcast::Sender as BroadcastSender; +use tokio::sync::{ + oneshot::{ Receiver as OneShotReciever, Sender as OneShotSender }, + broadcast::Sender as BroadcastSender }; +use crate::utils::files::create_watcher; +use std::fs::File; +use inotify::EventMask; // const CONFIG_PATH: &str = "settings.json"; pub mod v2 { + use core::error; + use std::path::PathBuf; + use super::*; pub async fn init_config_mechanism( @@ -34,62 +41,151 @@ pub mod v2 { let mut connection_delay: u64 = 1; loop { if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { - if let Ok(mut conn) = client.get_connection() { - match crate::utils::get_container_id() { - Some(channel_name) => { - // let channel_name = channel_name.trim(); - let mut pubsub = conn.as_pubsub(); - if pubsub.subscribe(&channel_name.trim()).is_ok() { - - todo!() - - } else { - error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name); - } - }, - None => { - error!("Cannot get channel name"); - } - } + if let Ok(conn) = client.get_connection() { + return Some(conn); } } error!("Error with subscribing Redis stream on update. Retrying in {} secs...", connection_delay); sleep(Duration::from_secs(connection_delay)).await; connection_delay *= 2; } + } - None + // loop checking redis pubsub + async fn pubsub_config_reciever( + // to subscribe redis pubsub channel and recieve configs + _redis_connection : Option, + // to stop checking local config + _local_conf_tx: Arc> + ) { + /*...*/ } // - pub async fn local_config_reciever( + async fn local_config_reciever( params : Arc, pubsub_oneshot : OneShotReciever, brd_tx : Arc>, /*...*/ ) { - /*...*/ - - // {:1} if local config is not exist -> cannot create watcher -> None - // {:2} if local config exists -> load_processes - // | - // | [Ok(Processes)] - // -> 1) broadcast sending parsed config to PRCS and CLI_PIPELINE - // 2) watcher in loop to deny local changes - // | - // | Err(_) - // -> - // ???? + /*...*/ + + // borrowing as mut + let mut pubsub_oneshot = pubsub_oneshot; + // fill with default empty config, mut to change later + let mut current_config = Processes::default(); + // PathBuf to &str to work with local config path as slice + let local_config_path = params + .config + .to_str() + .unwrap_or("settings.json"); + + match load_processes(local_config_path) { + // if local exists + Some(conf) => { + info!("Local config `{}` was found.", &conf.date_of_creation); + current_config = conf; + if let Err(er) = brd_tx.send(current_config.clone()) { + error!("Cannot share local config with broadcast due to {}", er); + } + }, + // if local is not exist + None => { + warn!("Local config wasn't found. Waiting for new ..."); + return; + // ... + }, + } + + // 100% local exists here + // create watcher on local config file + match create_watcher("", local_config_path).await { + Ok(mut watcher) => { + loop { + let mut need_to_export_config = false; + // return situations here + // 1) oneshot signal + // 2) if config was deleted -> recreate and fill with current config that is held here + // 3) if config was changed -> fill with current config that is held here + + // catching signal from pubsub + // it's because pubsub mech pulled new valid and actual config and now it's time to ... + // ... overwrite local config file and restart main thread + if let Ok(_) = pubsub_oneshot.try_recv() { + return; + } + + // ! IF NOXIS NEEDS TO RECREATE OR CHANGE LOCAL CONFIG NEED TO DRAIN THIS ACTIVITY ... + // ! ... FROM WATCHER"S BUFFER + + // existing check + if !params.config.exists() { + warn!("Local config file was deleted or moved. Recreating new one with saved data ..."); + need_to_export_config = true; + } else { + // changes check + let mut buffer = [0; 128]; + let events = watcher.read_events(&mut buffer); + if events.is_ok() { + let events: Vec = events + .unwrap() + .map(|mask| mask.mask) + .filter(|mask| { + *mask == EventMask::MODIFY || *mask == EventMask::DELETE_SELF + }) + .collect(); + if !events.is_empty() { + warn!("Local config file was overwritten. Discarding changes ..."); + need_to_export_config = true; + } + } + } + // exporting data + if need_to_export_config { + if let Err(er) = export_saved_config_data_locally(¶ms.config, ¤t_config).await { + error!("Cannot save actual imported config due to {}", er); + } else { + // recreation watcher + // if local config file was deleted and recreated + // if local config file was modified locally + match create_watcher("", local_config_path).await { + Ok(new) => watcher = new, + Err(er) => error!("Cannot create new watcher due to {}", er), + } + } + } + sleep(Duration::from_millis(500)).await; + } + }, + Err(_) => { + error!("Cannot create watcher on local config file `{}`. Deinitializing warding local config mechanism...", local_config_path); + }, + } + } // [:IN-TEST] - pub async fn cli_config_reciever(cli_oneshot: OneShotReciever) -> Option { + async fn from_cli_config_reciever(cli_oneshot: OneShotReciever) -> Option { /* match awaits til channel*/ match cli_oneshot.await { Ok(config_from_cli) => return Some(config_from_cli), _ => None, } } + + async fn export_saved_config_data_locally( + config_file_path: &PathBuf, + current_config: &Processes + ) -> anyhow::Result<()> { + + let mut file = File::create_new(config_file_path)?; + Ok( + file.write_all( + serde_json::to_string_pretty(current_config)?.as_bytes() + )? + ) + // Ok(()) + } } -- 2.40.1 From 3d88967281615c3ece0979fd415565d1acb5048a Mon Sep 17 00:00:00 2001 From: prplV Date: Wed, 5 Feb 2025 17:38:41 +0300 Subject: [PATCH 07/44] pubsub_config_reciever + cli-local adj --- noxis-rs/src/main.rs | 22 +++++- noxis-rs/src/options/config.rs | 136 ++++++++++++++++++++++++++++----- 2 files changed, 137 insertions(+), 21 deletions(-) diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index f29d2c0..232d37f 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -14,6 +14,8 @@ use std::time::Duration; use tokio::sync::mpsc; use utils::*; use options::preboot::PrebootParams; +use tokio::sync::{broadcast, oneshot}; +use options::config::v2::init_config_mechanism; #[tokio::main(flavor = "multi_thread")] async fn main() -> anyhow::Result<()>{ @@ -21,7 +23,25 @@ async fn main() -> anyhow::Result<()>{ let _ = setup_logger(); - info!("Runner is configurating..."); + info!("Noxis is configurating..."); + + let (tx_brd, mut _rx_brd) = broadcast::channel::(1); + let (_tx_oneshot, rx_oneshot) = oneshot::channel::(); + let mut handler: Vec> = vec![]; + + let config_module = tokio::spawn(async move { + let _ = init_config_mechanism( + rx_oneshot, + tx_brd, + preboot.clone() + ).await; + }); + + handler.push(config_module); + + for i in handler { + let _ = i.await; + } // setting up redis connection \ // then conf checks to choose the most actual \ diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 3deeb80..95f59ee 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -12,7 +12,7 @@ use tokio::time::{Duration, sleep}; // use redis::PubSub; use tokio::sync::{ oneshot::{ Receiver as OneShotReciever, Sender as OneShotSender }, - broadcast::Sender as BroadcastSender }; + broadcast::Sender as BroadcastSender, broadcast::Receiver as BroadcastReceiver }; use crate::utils::files::create_watcher; use std::fs::File; use inotify::EventMask; @@ -20,18 +20,22 @@ use inotify::EventMask; // const CONFIG_PATH: &str = "settings.json"; pub mod v2 { - use core::error; - use std::path::PathBuf; + use std::{fmt::format, path::PathBuf}; + use crate::utils::get_container_id; use super::*; pub async fn init_config_mechanism( // to handle cli config changes _cli_oneshot: OneShotReciever, - // to share local config with PRCS and CLI_PIPELINE modules - _brd_tx : Arc> + // to share local config with PRCS, CLI_PIPELINE and CONFIG modules + brd_tx : BroadcastSender, + // preboot params (args) + params : Arc /*...*/ ) { + // channel for pubsub to handle local config pulling + let _local_config_brd_reciever = brd_tx.subscribe(); /* local + pubsub + cli oneshot check */ } pub async fn get_redis_connection(params: Arc) -> Option { @@ -42,6 +46,7 @@ pub mod v2 { loop { if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { if let Ok(conn) = client.get_connection() { + info!("Successfully opened Redis connection"); return Some(conn); } } @@ -53,27 +58,98 @@ pub mod v2 { // loop checking redis pubsub async fn pubsub_config_reciever( - // to subscribe redis pubsub channel and recieve configs - _redis_connection : Option, // to stop checking local config - _local_conf_tx: Arc> - ) { + local_conf_tx : OneShotSender, + params : Arc, + tx_brd_local : BroadcastReceiver, + ) -> anyhow::Result<()>{ /*...*/ + let mut tx_brd_local = tx_brd_local; + let mut _local_config = Processes::default(); + return match get_redis_connection(params.clone()).await { + Some(mut conn) => { + // + let mut pub_sub = conn.as_pubsub(); + match pub_sub.subscribe(get_container_id().unwrap_or(String::from("default"))) { + Err(er) => { + error!("Cannot subscribe pubsub channel due to {}", &er); + Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er))) + }, + Ok(_) => { + loop { + // brd check + // if let Ok(new_lc) = tx_brd_local.recv().await { + + // } + if !tx_brd_local.is_empty() { + match tx_brd_local.recv().await { + Ok(lc) => _local_config = lc, + Err(er) => { + error!("Cannot get imported local config due to {}", &er); + return Err(anyhow::Error::msg( + format!("Cannot get imported local config due to {}", er)) + ) + } + } + } + // pubsub check + if let Ok(msg) = pub_sub.get_message() { + let payload : Result = msg.get_payload(); + match payload { + Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "), + Ok(payload) => { + if let Some(remote) = parse_extern_config(&payload) { + match config_comparing(&_local_config, &remote) { + ConfigActuality::Local => { + warn!("Pulled new config from Redis channel. Current config is more actual ..."); + }, + ConfigActuality::Remote => { + info!("Pulled new actual config from Redis channel, version - `{}`", remote.date_of_creation); + // to stop watching local config file mechanism + let _ = local_conf_tx.send(true); + let config_path = params.config.to_str().unwrap_or("settings.json"); + + if save_new_config(&remote, &config_path).is_err() { + error!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path); + return Err(anyhow::Error::msg( + format!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path) + )) + } + return Ok(()); + }, + } + } + else { + warn!("Invalid config was pulled from Redis channel") + } + }, + } + } + // delay + sleep(Duration::from_millis(500)).await; + } + }, + } + }, + None => Err(anyhow::Error::msg("Cannot create Redis connection")) + } } // async fn local_config_reciever( params : Arc, pubsub_oneshot : OneShotReciever, + cli_oneshot : OneShotReciever, brd_tx : Arc>, /*...*/ - ) { + ) -> anyhow::Result<()> { /*...*/ // borrowing as mut let mut pubsub_oneshot = pubsub_oneshot; + let mut cli_oneshot = cli_oneshot; // fill with default empty config, mut to change later - let mut current_config = Processes::default(); + let mut _current_config = Processes::default(); // PathBuf to &str to work with local config path as slice let local_config_path = params .config @@ -84,15 +160,15 @@ pub mod v2 { // if local exists Some(conf) => { info!("Local config `{}` was found.", &conf.date_of_creation); - current_config = conf; - if let Err(er) = brd_tx.send(current_config.clone()) { + _current_config = conf; + if let Err(er) = brd_tx.send(_current_config.clone()) { error!("Cannot share local config with broadcast due to {}", er); } }, // if local is not exist None => { warn!("Local config wasn't found. Waiting for new ..."); - return; + return Err(anyhow::Error::msg("No local config")); // ... }, } @@ -112,7 +188,16 @@ pub mod v2 { // it's because pubsub mech pulled new valid and actual config and now it's time to ... // ... overwrite local config file and restart main thread if let Ok(_) = pubsub_oneshot.try_recv() { - return; + sleep(Duration::from_secs(1)).await; + return Ok(()); + } + + // catching signal from cli + // it's because cli mech pulled new valid and actual config and now it's time to ... + // ... overwrite local config file and restart main thread (like in previous mechanism) + if let Ok(_) = cli_oneshot.try_recv() { + sleep(Duration::from_secs(1)).await; + return Ok(()); } // ! IF NOXIS NEEDS TO RECREATE OR CHANGE LOCAL CONFIG NEED TO DRAIN THIS ACTIVITY ... @@ -142,10 +227,10 @@ pub mod v2 { } // exporting data if need_to_export_config { - if let Err(er) = export_saved_config_data_locally(¶ms.config, ¤t_config).await { + if let Err(er) = export_saved_config_data_locally(¶ms.config, &_current_config).await { error!("Cannot save actual imported config due to {}", er); } else { - // recreation watcher + // recreation watcher (draining activity buffer mechanism) // if local config file was deleted and recreated // if local config file was modified locally match create_watcher("", local_config_path).await { @@ -159,16 +244,24 @@ pub mod v2 { }, Err(_) => { error!("Cannot create watcher on local config file `{}`. Deinitializing warding local config mechanism...", local_config_path); + return Err(anyhow::Error::msg("Cannot create watcher on local config file")); }, } } // [:IN-TEST] - async fn from_cli_config_reciever(cli_oneshot: OneShotReciever) -> Option { + async fn from_cli_config_reciever( + cli_oneshot: OneShotReciever, + to_local_tx: OneShotSender + ) -> Option { /* match awaits til channel*/ match cli_oneshot.await { - Ok(config_from_cli) => return Some(config_from_cli), + Ok(config_from_cli) => { + info!("New actual config `{}` from CLI was pulled. Saving and restaring ...", &config_from_cli.date_of_creation); + let _ = to_local_tx.send(true); + Some(config_from_cli) + }, _ => None, } } @@ -178,7 +271,7 @@ pub mod v2 { current_config: &Processes ) -> anyhow::Result<()> { - let mut file = File::create_new(config_file_path)?; + let mut file = File::create(config_file_path)?; Ok( file.write_all( serde_json::to_string_pretty(current_config)?.as_bytes() @@ -574,6 +667,9 @@ pub async fn subscribe_config_stream(actual_prcs: Arc, params: Arc ConfigActuality { + if local.is_default() { + return ConfigActuality::Remote; + } let local_date: u64 = local.date_of_creation.parse().unwrap(); let remote_date: u64 = remote.date_of_creation.parse().unwrap(); -- 2.40.1 From a8a7fd8a72f342b2d2105bb5206d0892a4605dd5 Mon Sep 17 00:00:00 2001 From: prplV Date: Wed, 5 Feb 2025 17:50:37 +0300 Subject: [PATCH 08/44] init_config_mechanism 50% --- noxis-rs/src/options/config.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 95f59ee..b55ff33 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -11,6 +11,7 @@ use super::preboot::PrebootParams; use tokio::time::{Duration, sleep}; // use redis::PubSub; use tokio::sync::{ + oneshot, oneshot::{ Receiver as OneShotReciever, Sender as OneShotSender }, broadcast::Sender as BroadcastSender, broadcast::Receiver as BroadcastReceiver }; use crate::utils::files::create_watcher; @@ -36,7 +37,20 @@ pub mod v2 { ) { // channel for pubsub to handle local config pulling let _local_config_brd_reciever = brd_tx.subscribe(); - /* local + pubsub + cli oneshot check */ + // channel between pub-sub mech and local config mech + let (tx_pb_lc, rx_pb_lc) = oneshot::channel::(); + // channel between cli mech and local config mech + let (tx_cli_lc, rx_cli_lc) = oneshot::channel::(); + + // future to init work with local config + let lc_future = local_config_reciever( + params, + rx_pb_lc, + rx_cli_lc, + Arc::new(brd_tx) + ); + // TODO! futures + select! + // TODO! tests config } pub async fn get_redis_connection(params: Arc) -> Option { if params.no_sub { -- 2.40.1 From 98da769dd3c116c24989e1c630f84abef09f9073 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 6 Feb 2025 13:50:18 +0300 Subject: [PATCH 09/44] lc check delay decreased --- noxis-rs/settings.json | 4 +- noxis-rs/src/options/config.rs | 73 +++++++++++++++++++++++++++------- 2 files changed, 61 insertions(+), 16 deletions(-) diff --git a/noxis-rs/settings.json b/noxis-rs/settings.json index f830fce..f5fab28 100644 --- a/noxis-rs/settings.json +++ b/noxis-rs/settings.json @@ -1,5 +1,5 @@ { - "dateOfCreation": "1721381809104", + "dateOfCreation": "1721381809106", "configServer": "localhost", "processes": [ { @@ -30,4 +30,4 @@ } } ] -} +} \ No newline at end of file diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index b55ff33..44cebcd 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -21,14 +21,14 @@ use inotify::EventMask; // const CONFIG_PATH: &str = "settings.json"; pub mod v2 { - use std::{fmt::format, path::PathBuf}; + use std::path::PathBuf; use crate::utils::get_container_id; use super::*; pub async fn init_config_mechanism( // to handle cli config changes - _cli_oneshot: OneShotReciever, + cli_oneshot: OneShotReciever, // to share local config with PRCS, CLI_PIPELINE and CONFIG modules brd_tx : BroadcastSender, // preboot params (args) @@ -36,20 +36,53 @@ pub mod v2 { /*...*/ ) { // channel for pubsub to handle local config pulling - let _local_config_brd_reciever = brd_tx.subscribe(); + let local_config_brd_reciever = brd_tx.subscribe(); // channel between pub-sub mech and local config mech let (tx_pb_lc, rx_pb_lc) = oneshot::channel::(); // channel between cli mech and local config mech let (tx_cli_lc, rx_cli_lc) = oneshot::channel::(); + // dbg!("before lc"); + let params_clone = params.clone(); + // future to init work with local config - let lc_future = local_config_reciever( - params, - rx_pb_lc, - rx_cli_lc, - Arc::new(brd_tx) - ); - // TODO! futures + select! + let lc_future = tokio::spawn(async move { + // let params = params.clone(); + let _ = local_config_reciever( + params_clone, + rx_pb_lc, + rx_cli_lc, + Arc::new(brd_tx) + ).await; + }); + // dbg!("before pb"); + // future to init work with pub sub mechanism + let pubsub_future = tokio::spawn(async move { + let _ = pubsub_config_reciever( + tx_pb_lc, + params.clone(), + local_config_brd_reciever + ).await; + }); + + // dbg!("before cli"); + // future to catch new configs from cli pipeline + let cli_future = tokio::spawn(async move { + from_cli_config_reciever( + cli_oneshot, + tx_cli_lc + ).await; + + }); + // let _ = lc_future.await; + // dbg!("before select"); + tokio::select! { + lc_result = lc_future => {dbg!("end of lc");}, + pb_result = pubsub_future => {dbg!("end of pb");}, + cli_config_option = cli_future => {dbg!("end of cli");}, + } + // dbg!("after select"); + // TODO! futures + select! [OK] // TODO! tests config } pub async fn get_redis_connection(params: Arc) -> Option { @@ -78,18 +111,22 @@ pub mod v2 { tx_brd_local : BroadcastReceiver, ) -> anyhow::Result<()>{ /*...*/ + // dbg!("start of pb"); let mut tx_brd_local = tx_brd_local; let mut _local_config = Processes::default(); return match get_redis_connection(params.clone()).await { Some(mut conn) => { // let mut pub_sub = conn.as_pubsub(); - match pub_sub.subscribe(get_container_id().unwrap_or(String::from("default"))) { + let channel_name = get_container_id().unwrap_or(String::from("default")); + let channel_name = channel_name.trim(); + match pub_sub.subscribe(channel_name) { Err(er) => { error!("Cannot subscribe pubsub channel due to {}", &er); Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er))) }, Ok(_) => { + info!("Successfully subscribed to {} pubsub channel", channel_name); loop { // brd check // if let Ok(new_lc) = tx_brd_local.recv().await { @@ -108,6 +145,7 @@ pub mod v2 { } // pubsub check if let Ok(msg) = pub_sub.get_message() { + // dbg!("ok on get message"); let payload : Result = msg.get_payload(); match payload { Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "), @@ -115,7 +153,7 @@ pub mod v2 { if let Some(remote) = parse_extern_config(&payload) { match config_comparing(&_local_config, &remote) { ConfigActuality::Local => { - warn!("Pulled new config from Redis channel. Current config is more actual ..."); + warn!("Pulled new config from Redis channel, it's outdated. Ignoring ..."); }, ConfigActuality::Remote => { info!("Pulled new actual config from Redis channel, version - `{}`", remote.date_of_creation); @@ -140,6 +178,7 @@ pub mod v2 { } } // delay + // dbg!("before sleep pubsub"); sleep(Duration::from_millis(500)).await; } }, @@ -158,7 +197,6 @@ pub mod v2 { /*...*/ ) -> anyhow::Result<()> { /*...*/ - // borrowing as mut let mut pubsub_oneshot = pubsub_oneshot; let mut cli_oneshot = cli_oneshot; @@ -193,6 +231,7 @@ pub mod v2 { Ok(mut watcher) => { loop { let mut need_to_export_config = false; + // let mut need_to_recreate_watcher = false; // return situations here // 1) oneshot signal // 2) if config was deleted -> recreate and fill with current config that is held here @@ -221,6 +260,7 @@ pub mod v2 { if !params.config.exists() { warn!("Local config file was deleted or moved. Recreating new one with saved data ..."); need_to_export_config = true; + // need_to_recreate_watcher = true; } else { // changes check let mut buffer = [0; 128]; @@ -236,6 +276,10 @@ pub mod v2 { if !events.is_empty() { warn!("Local config file was overwritten. Discarding changes ..."); need_to_export_config = true; + // events + // .iter() + // .any(|event| *event == EventMask::DELETE_SELF) + // .then(|| need_to_recreate_watcher = true); } } } @@ -253,7 +297,7 @@ pub mod v2 { } } } - sleep(Duration::from_millis(500)).await; + sleep(Duration::from_millis(300)).await; } }, Err(_) => { @@ -270,6 +314,7 @@ pub mod v2 { to_local_tx: OneShotSender ) -> Option { /* match awaits til channel*/ + dbg!("start of cli"); match cli_oneshot.await { Ok(config_from_cli) => { info!("New actual config `{}` from CLI was pulled. Saving and restaring ...", &config_from_cli.date_of_creation); -- 2.40.1 From 6adab1b903eb185486d530b8b956ae89b5e9dae8 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 6 Feb 2025 14:37:17 +0300 Subject: [PATCH 10/44] joinhandlers ending is catching and processing --- noxis-rs/settings.json | 2 +- noxis-rs/src/options/config.rs | 74 ++++++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 9 deletions(-) diff --git a/noxis-rs/settings.json b/noxis-rs/settings.json index f5fab28..2b5f808 100644 --- a/noxis-rs/settings.json +++ b/noxis-rs/settings.json @@ -1,5 +1,5 @@ { - "dateOfCreation": "1721381809106", + "dateOfCreation": "1721381809107", "configServer": "localhost", "processes": [ { diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 44cebcd..88fdbe7 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -44,25 +44,30 @@ pub mod v2 { // dbg!("before lc"); let params_clone = params.clone(); + let for_lc_path = params.clone(); + let lc_path = for_lc_path + .config + .to_str() + .unwrap_or("settings.json"); // future to init work with local config let lc_future = tokio::spawn(async move { // let params = params.clone(); - let _ = local_config_reciever( + local_config_reciever( params_clone, rx_pb_lc, rx_cli_lc, Arc::new(brd_tx) - ).await; + ).await }); // dbg!("before pb"); // future to init work with pub sub mechanism let pubsub_future = tokio::spawn(async move { - let _ = pubsub_config_reciever( + pubsub_config_reciever( tx_pb_lc, params.clone(), local_config_brd_reciever - ).await; + ).await }); // dbg!("before cli"); @@ -71,15 +76,68 @@ pub mod v2 { from_cli_config_reciever( cli_oneshot, tx_cli_lc - ).await; + ).await }); // let _ = lc_future.await; // dbg!("before select"); tokio::select! { - lc_result = lc_future => {dbg!("end of lc");}, - pb_result = pubsub_future => {dbg!("end of pb");}, - cli_config_option = cli_future => {dbg!("end of cli");}, + lc_result = lc_future => { + // dbg!("end of lc"); + match lc_result { + Ok(res) => { + if res.is_ok() { + info!("Local config warding mechanism stopped, waiting for others ..."); + sleep(Duration::from_millis(500)).await; + } + else { + error!("Local config warding mechanism crushed, restarting ..."); + let _ = restart_main_thread(); + } + }, + Err(_) => { + error!("Local config warding mechanism crushed, restarting ..."); + let _ = restart_main_thread(); + }, + } + }, + pb_result = pubsub_future => { + match pb_result { + Ok(res) => { + if res.is_ok() { + info!("New config was saved locally, restarting ..."); + } + else { + error!("Pubsub mechanism crushed, restarting ..."); + } + }, + Err(_) => { + error!("Pubsub mechanism crushed, restarting ..."); + }, + } + let _ = restart_main_thread(); + }, + cli_config_option = cli_future => { + // match cli_config_option { + // Some(config) => {}, + // None => { + // error!("Cli pulling new config mechanism crushed, restarting ...") + // }, + // } + match cli_config_option { + Err(_) => error!("Cli pulling new config mechanism crushed, restarting ..."), + Ok(option_config) => { + match option_config { + None => error!("Cli pulling new config mechanism crushed, restarting ..."), + Some(config) => { + info!("New config was pulled from CLI, saving and restarting ..."); + let _ = save_new_config(&config, lc_path); + }, + } + }, + } + let _ = restart_main_thread(); + }, } // dbg!("after select"); // TODO! futures + select! [OK] -- 2.40.1 From e4c3e5f46f3fe5096a271b80adfe3a1c0b7fcee6 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 6 Feb 2025 14:39:32 +0300 Subject: [PATCH 11/44] fix of lc_result ending firstly --- noxis-rs/settings.json | 4 ++-- noxis-rs/src/options/config.rs | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/noxis-rs/settings.json b/noxis-rs/settings.json index 2b5f808..180de41 100644 --- a/noxis-rs/settings.json +++ b/noxis-rs/settings.json @@ -1,5 +1,5 @@ { - "dateOfCreation": "1721381809107", + "dateOfCreation": "1721381809109", "configServer": "localhost", "processes": [ { @@ -30,4 +30,4 @@ } } ] -} \ No newline at end of file +} diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 88fdbe7..4e0dd4b 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -89,6 +89,7 @@ pub mod v2 { if res.is_ok() { info!("Local config warding mechanism stopped, waiting for others ..."); sleep(Duration::from_millis(500)).await; + let _ = restart_main_thread(); } else { error!("Local config warding mechanism crushed, restarting ..."); -- 2.40.1 From 064611823ab1f81fa6319d20487cc9ceeb6711b5 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 6 Feb 2025 15:08:16 +0300 Subject: [PATCH 12/44] ass --- noxis-rs/settings.json | 2 +- noxis-rs/src/options/config.rs | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/noxis-rs/settings.json b/noxis-rs/settings.json index 180de41..ba3eabf 100644 --- a/noxis-rs/settings.json +++ b/noxis-rs/settings.json @@ -1,5 +1,5 @@ { - "dateOfCreation": "1721381809109", + "dateOfCreation": "1721381809110", "configServer": "localhost", "processes": [ { diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 4e0dd4b..1bd106e 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -390,11 +390,10 @@ pub mod v2 { ) -> anyhow::Result<()> { let mut file = File::create(config_file_path)?; - Ok( - file.write_all( - serde_json::to_string_pretty(current_config)?.as_bytes() - )? - ) + file.write_all( + serde_json::to_string_pretty(current_config)?.as_bytes() + )?; + Ok(()) // Ok(()) } } -- 2.40.1 From 35a21da43136c230c42d25a645fa24ea10f49b50 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 27 Mar 2025 10:33:37 -0400 Subject: [PATCH 13/44] reding config lc+pubsub rework --- noxis-rs/settings.json | 4 +- noxis-rs/src/options/cli_pipeline.rs | 1 + noxis-rs/src/options/config.rs | 170 +++++++++++++-------------- 3 files changed, 88 insertions(+), 87 deletions(-) diff --git a/noxis-rs/settings.json b/noxis-rs/settings.json index ba3eabf..10485c8 100644 --- a/noxis-rs/settings.json +++ b/noxis-rs/settings.json @@ -1,6 +1,6 @@ { - "dateOfCreation": "1721381809110", - "configServer": "localhost", + "dateOfCreation": "1721381809112", + "configServer": "192.168.2.37", "processes": [ { "name": "temp-process", diff --git a/noxis-rs/src/options/cli_pipeline.rs b/noxis-rs/src/options/cli_pipeline.rs index ad6a670..5456d83 100644 --- a/noxis-rs/src/options/cli_pipeline.rs +++ b/noxis-rs/src/options/cli_pipeline.rs @@ -5,6 +5,7 @@ use tokio::time::{sleep, Duration}; use std::{borrow::BorrowMut, net::{IpAddr, Ipv4Addr}}; // use std::io::BufReader; use tokio::io::{BufReader, AsyncWriteExt, AsyncBufReadExt}; +use tokio::{io::AsyncReadExt, net::UnixListener}; use noxis_cli::Cli; use serde_json::from_str; diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 1bd106e..a1f3e56 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -119,12 +119,6 @@ pub mod v2 { let _ = restart_main_thread(); }, cli_config_option = cli_future => { - // match cli_config_option { - // Some(config) => {}, - // None => { - // error!("Cli pulling new config mechanism crushed, restarting ...") - // }, - // } match cli_config_option { Err(_) => error!("Cli pulling new config mechanism crushed, restarting ..."), Ok(option_config) => { @@ -144,22 +138,20 @@ pub mod v2 { // TODO! futures + select! [OK] // TODO! tests config } - pub async fn get_redis_connection(params: Arc) -> Option { - if params.no_sub { - return None; - } - let mut connection_delay: u64 = 1; - loop { - if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { + pub async fn get_redis_connection(params: &str) -> Option { + for i in 1..=3 { + let redis_url = format!("redis://{}/", params); + info!("Trying to connect Redis pubsub `{}`. Attempt {}", &redis_url, i); + if let Ok(client) = Client::open(redis_url) { if let Ok(conn) = client.get_connection() { info!("Successfully opened Redis connection"); return Some(conn); } } - error!("Error with subscribing Redis stream on update. Retrying in {} secs...", connection_delay); - sleep(Duration::from_secs(connection_delay)).await; - connection_delay *= 2; + error!("Error with subscribing Redis stream on update. Retrying in 5 secs..."); + sleep(Duration::from_secs(5)).await; } + None } // loop checking redis pubsub @@ -171,80 +163,88 @@ pub mod v2 { ) -> anyhow::Result<()>{ /*...*/ // dbg!("start of pb"); - let mut tx_brd_local = tx_brd_local; - let mut _local_config = Processes::default(); - return match get_redis_connection(params.clone()).await { - Some(mut conn) => { - // - let mut pub_sub = conn.as_pubsub(); - let channel_name = get_container_id().unwrap_or(String::from("default")); - let channel_name = channel_name.trim(); - match pub_sub.subscribe(channel_name) { - Err(er) => { - error!("Cannot subscribe pubsub channel due to {}", &er); - Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er))) - }, - Ok(_) => { - info!("Successfully subscribed to {} pubsub channel", channel_name); - loop { - // brd check - // if let Ok(new_lc) = tx_brd_local.recv().await { + sleep(Duration::from_secs(1)).await; - // } - if !tx_brd_local.is_empty() { - match tx_brd_local.recv().await { - Ok(lc) => _local_config = lc, - Err(er) => { - error!("Cannot get imported local config due to {}", &er); - return Err(anyhow::Error::msg( - format!("Cannot get imported local config due to {}", er)) - ) + let mut tx_brd_local = tx_brd_local; + let mut local_config = Processes::default(); + + for retry in 1..=5 { + if !tx_brd_local.is_empty() { + match tx_brd_local.recv().await { + Ok(lc) => local_config = lc, + Err(er) => { + error!("Cannot get imported local config due to {}", &er); + return Err(anyhow::Error::msg( + format!("Cannot get imported local config due to {}", er)) + ) + } + } + } + match get_redis_connection(&local_config.config_server).await { + Some(mut conn) => { + // + let mut pub_sub = conn.as_pubsub(); + let channel_name = get_container_id().unwrap_or(String::from("default")); + let channel_name = channel_name.trim(); + match pub_sub.subscribe(channel_name) { + Err(er) => { + error!("Cannot subscribe pubsub channel due to {}", &er); + return Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er))) + }, + Ok(_) => { + info!("Successfully subscribed to {} pubsub channel", channel_name); + loop { + // pubsub check + if let Ok(msg) = pub_sub.get_message() { + // dbg!("ok on get message"); + let payload : Result = msg.get_payload(); + match payload { + Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "), + Ok(payload) => { + if let Some(remote) = parse_extern_config(&payload) { + match config_comparing(&local_config, &remote) { + ConfigActuality::Local => { + warn!("Pulled new config from Redis channel, it's outdated. Ignoring ..."); + }, + ConfigActuality::Remote => { + info!("Pulled new actual config from Redis channel, version - `{}`", remote.date_of_creation); + // to stop watching local config file mechanism + let _ = local_conf_tx.send(true); + let config_path = params.config.to_str().unwrap_or("settings.json"); + + if save_new_config(&remote, &config_path).is_err() { + error!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path); + return Err(anyhow::Error::msg( + format!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path) + )) + } + return Ok(()); + }, + } + } + else { + warn!("Invalid config was pulled from Redis channel") + } + }, } } + // delay + // dbg!("before sleep pubsub"); + sleep(Duration::from_millis(500)).await; } - // pubsub check - if let Ok(msg) = pub_sub.get_message() { - // dbg!("ok on get message"); - let payload : Result = msg.get_payload(); - match payload { - Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "), - Ok(payload) => { - if let Some(remote) = parse_extern_config(&payload) { - match config_comparing(&_local_config, &remote) { - ConfigActuality::Local => { - warn!("Pulled new config from Redis channel, it's outdated. Ignoring ..."); - }, - ConfigActuality::Remote => { - info!("Pulled new actual config from Redis channel, version - `{}`", remote.date_of_creation); - // to stop watching local config file mechanism - let _ = local_conf_tx.send(true); - let config_path = params.config.to_str().unwrap_or("settings.json"); - - if save_new_config(&remote, &config_path).is_err() { - error!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path); - return Err(anyhow::Error::msg( - format!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path) - )) - } - return Ok(()); - }, - } - } - else { - warn!("Invalid config was pulled from Redis channel") - } - }, - } - } - // delay - // dbg!("before sleep pubsub"); - sleep(Duration::from_millis(500)).await; - } - }, + }, + } + }, + None => { + warn!("Cannot validly connect Redis connection. Blocking task for 20 secs and restarting tries (attempt {})", retry); + sleep(Duration::from_secs(20)).await; } - }, - None => Err(anyhow::Error::msg("Cannot create Redis connection")) + } } + error!("End of retries. Stopping pubsub..."); + return Err(anyhow::Error::msg( + format!("End of retries. Stopping pubsub...") + )) } // @@ -373,7 +373,7 @@ pub mod v2 { to_local_tx: OneShotSender ) -> Option { /* match awaits til channel*/ - dbg!("start of cli"); + // dbg!("start of cli"); match cli_oneshot.await { Ok(config_from_cli) => { info!("New actual config `{}` from CLI was pulled. Saving and restaring ...", &config_from_cli.date_of_creation); -- 2.40.1 From 163887d42c6f1414951ab9b9d74442967c10fb86 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 27 Mar 2025 10:46:22 -0400 Subject: [PATCH 14/44] tcp -> unixsocket --- .gitignore | 1 + noxis-rs/src/main.rs | 6 +++- noxis-rs/src/options/cli_pipeline.rs | 51 ++++++++++++++-------------- 3 files changed, 32 insertions(+), 26 deletions(-) diff --git a/.gitignore b/.gitignore index c69fa41..eae2549 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ Cargo.lock hagent_test.sock release +*.sock \ No newline at end of file diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index 232d37f..60e7ded 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -36,9 +36,13 @@ async fn main() -> anyhow::Result<()>{ preboot.clone() ).await; }); - handler.push(config_module); + let cli_module = tokio::spawn(async move { + let _ = init_cli_pipeline().await; + }); + handler.push(cli_module); + for i in handler { let _ = i.await; } diff --git a/noxis-rs/src/options/cli_pipeline.rs b/noxis-rs/src/options/cli_pipeline.rs index 5456d83..2047daa 100644 --- a/noxis-rs/src/options/cli_pipeline.rs +++ b/noxis-rs/src/options/cli_pipeline.rs @@ -1,8 +1,8 @@ use log::{error, info, warn}; -use tokio::net::{TcpListener, TcpStream}; +use tokio::net::{TcpListener, TcpStream, UnixStream}; use anyhow::{Result as DynResult, Error}; use tokio::time::{sleep, Duration}; -use std::{borrow::BorrowMut, net::{IpAddr, Ipv4Addr}}; +use std::{borrow::BorrowMut, fs, net::{IpAddr, Ipv4Addr}}; // use std::io::BufReader; use tokio::io::{BufReader, AsyncWriteExt, AsyncBufReadExt}; use tokio::{io::AsyncReadExt, net::UnixListener}; @@ -24,21 +24,20 @@ use serde_json::from_str; /// pub async fn init_cli_pipeline() -> DynResult<()> { match init_listener().await { - Some(list) => { + Ok(list) => { + info!("Successfully opened UnixListener for CLI"); loop { - if let Ok((socket, addr)) = list.accept().await { - // isolation - if IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)) != addr.ip() { - warn!("Declined attempt to connect TCP-socket from {}", addr); - continue; - } + if let Ok((socket, _)) = list.accept().await { process_connection(socket).await; } sleep(Duration::from_millis(500)).await; } // Ok(()) }, - None => Err(Error::msg("Addr 127.0.0.1:7753 is already in use")) + Err(er) => { + error!("Failed to open UnixListener for CLI"); + Err(er) + }, } } @@ -55,17 +54,20 @@ pub async fn init_cli_pipeline() -> DynResult<()> { /// /// *depends on* : `tokio::net::TcpListener` /// -async fn init_listener() -> Option { - match TcpListener::bind("127.0.0.1:7753").await { - Ok(listener) => { - info!("Runner is listening localhost:7753"); - Some(listener) - }, - Err(_) => { - error!("Cannot create TCP listener for CLI"); - None - } - } +async fn init_listener() -> anyhow::Result { + // match TcpListener::bind("127.0.0.1:7753").await { + // Ok(listener) => { + // info!("Runner is listening localhost:7753"); + // Some(listener) + // }, + // Err(_) => { + // error!("Cannot create TCP listener for CLI"); + // None + // } + // } + let socket_path = "noxis-rs"; + let _ = fs::remove_file(socket_path); + Ok(UnixListener::bind(socket_path)?) } /// # Fn `process_connection` @@ -81,11 +83,10 @@ async fn init_listener() -> Option { /// /// *depends on* : `tokio::net::TcpStream` /// -async fn process_connection(mut stream: TcpStream) { +async fn process_connection(mut stream: UnixStream) { let buf_reader = BufReader::new(stream.borrow_mut()); let mut rqst = buf_reader.lines(); - - + while let Ok(Some(line)) = rqst.next_line().await { if line.is_empty() { break @@ -102,6 +103,6 @@ async fn process_connection(mut stream: TcpStream) { println!("{}", line); } - let response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\nContent-Type: text/plain\r\n\r\nHello, World!"; + let response = "OK"; stream.write_all(response.as_bytes()).await.unwrap(); } -- 2.40.1 From 026a5020443de90ad98e57363fb8f97a1d1014dc Mon Sep 17 00:00:00 2001 From: prplV Date: Fri, 28 Mar 2025 05:13:43 -0400 Subject: [PATCH 15/44] us changes --- noxis-cli/src/cli.rs | 6 +++ noxis-cli/src/cli_error.rs | 9 ++-- noxis-cli/src/cli_net.rs | 42 +++++++++--------- noxis-cli/src/main.rs | 4 +- noxis-rs/src/main.rs | 1 - noxis-rs/src/options/cli_pipeline.rs | 64 ++++++++++++++++------------ 6 files changed, 69 insertions(+), 57 deletions(-) diff --git a/noxis-cli/src/cli.rs b/noxis-cli/src/cli.rs index 6c07db6..b96e85f 100644 --- a/noxis-cli/src/cli.rs +++ b/noxis-cli/src/cli.rs @@ -2,6 +2,12 @@ use clap::{Parser, Subcommand}; #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] pub struct Cli { + #[arg( + short, + default_value="noxis-rs.sock", + help="explicit specify of NOXIS Socket file" + )] + pub socket : String, #[command( subcommand, help = "to manage Noxis work", diff --git a/noxis-cli/src/cli_error.rs b/noxis-cli/src/cli_error.rs index bb3a8bd..7589738 100644 --- a/noxis-cli/src/cli_error.rs +++ b/noxis-cli/src/cli_error.rs @@ -1,14 +1,15 @@ use thiserror::Error; -use super::cli_net::NOXIS_RS_CREDS; #[derive(Debug, Error)] pub enum NoxisCliError { - #[error("Can't send any data to {:?}. Noxis-rs daemon is disabled or can't be accessed", NOXIS_RS_CREDS)] - NoxisDaemonMissing, - #[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's web-functionality")] + #[error("Can't find socket `{0}`. Noxis-rs daemon is disabled or can't be accessed using Unix-Socket")] + NoxisDaemonMissing(String), + #[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's runtime!")] PortIsNotWritable, #[error("Can't send Cli-prompt to the Noxis-rs. Check it's state")] CliPromptCanNotBeSent, #[error("Can't parse CLI struct and send as byte stream")] ToStringCliParsingParsing, + #[error("Can't read Noxis response")] + CliResponseReadError } \ No newline at end of file diff --git a/noxis-cli/src/cli_net.rs b/noxis-cli/src/cli_net.rs index b0fbfe7..f148849 100644 --- a/noxis-cli/src/cli_net.rs +++ b/noxis-cli/src/cli_net.rs @@ -1,32 +1,30 @@ -use tokio::net::TcpStream; -use tokio::io::AsyncWriteExt; +use tokio::net::UnixStream; +use tokio::io::{AsyncWriteExt, AsyncReadExt}; use tokio::time::{Duration, sleep}; use anyhow::Result; use super::Cli; use super::cli_error::NoxisCliError; -pub const NOXIS_RS_CREDS: &str = "127.0.0.1:7753"; - - -pub async fn create_tcp_stream() -> Result { - Ok(TcpStream::connect(NOXIS_RS_CREDS).await.map_err(|_| NoxisCliError::NoxisDaemonMissing)?) +async fn create_us_stream(cli: &Cli) -> Result { + Ok(UnixStream::connect(&cli.socket).await.map_err(|_| NoxisCliError::NoxisDaemonMissing((&cli.socket).to_string()))?) } -pub async fn try_send(stream: Result, params: Cli) -> Result<()> { - use serde_json::to_string; - let mut stream = stream.map_err(|_| NoxisCliError::NoxisDaemonMissing)?; - loop { - if stream.writable().await.is_err() { - sleep(Duration::from_millis(100)).await; - continue; - } - // let msg: Cli = from_str(&format!("{:?}", params))?; - let msg= to_string(¶ms).map_err(|_| NoxisCliError::ToStringCliParsingParsing)?; - // let msg = r"HTTP/1.1 POST\r\nContent-Length: 14\r\nContent-Type: text/plain\r\n\r\nHello, World!@"; +pub async fn try_send(cli: Cli) -> Result<()> { + // let stream = create_us_stream(&cli).await; + let mut stream = create_us_stream(&cli).await?; - stream.write_all(msg.as_bytes()).await.map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?; - // ... - break; - } + let msg = serde_json::to_vec(&cli) + .map_err(|_| NoxisCliError::ToStringCliParsingParsing)?; + + stream.write_all(&msg).await + .map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?; + + stream.shutdown().await?; + + let mut response = Vec::new(); + stream.read_to_end(&mut response).await + .map_err(|_| NoxisCliError::CliResponseReadError)?; + + println!("Received response: {}", String::from_utf8_lossy(&response)); Ok(()) } \ No newline at end of file diff --git a/noxis-cli/src/main.rs b/noxis-cli/src/main.rs index 9262502..7961b75 100644 --- a/noxis-cli/src/main.rs +++ b/noxis-cli/src/main.rs @@ -4,12 +4,12 @@ mod cli_error; use clap::Parser; use cli::Cli; -use cli_net::{create_tcp_stream, try_send}; +use cli_net::try_send; use anyhow::Result; #[tokio::main] async fn main() -> Result<()>{ let cli = Cli::parse(); - try_send(create_tcp_stream().await, cli).await?; + try_send(cli).await?; Ok(()) } diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index 60e7ded..951eadf 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -1,7 +1,6 @@ mod options; mod utils; -use anyhow::Error; use clap::Parser; use log::{error, info}; use options::config::*; diff --git a/noxis-rs/src/options/cli_pipeline.rs b/noxis-rs/src/options/cli_pipeline.rs index 2047daa..96233e9 100644 --- a/noxis-rs/src/options/cli_pipeline.rs +++ b/noxis-rs/src/options/cli_pipeline.rs @@ -1,13 +1,11 @@ -use log::{error, info, warn}; -use tokio::net::{TcpListener, TcpStream, UnixStream}; -use anyhow::{Result as DynResult, Error}; +use log::{error, info}; +use tokio::net::{ UnixStream, UnixListener }; +use anyhow::Result as DynResult; use tokio::time::{sleep, Duration}; -use std::{borrow::BorrowMut, fs, net::{IpAddr, Ipv4Addr}}; +use std::fs; // use std::io::BufReader; -use tokio::io::{BufReader, AsyncWriteExt, AsyncBufReadExt}; -use tokio::{io::AsyncReadExt, net::UnixListener}; +use tokio::io::{ AsyncWriteExt, AsyncReadExt}; use noxis_cli::Cli; -use serde_json::from_str; /// # Fn `init_cli_pipeline` /// ## for catching all input requests from CLI @@ -25,12 +23,19 @@ use serde_json::from_str; pub async fn init_cli_pipeline() -> DynResult<()> { match init_listener().await { Ok(list) => { - info!("Successfully opened UnixListener for CLI"); + // TODO: remove `unwrap`s + info!("Listening on {}", &list.local_addr()?.as_pathname().unwrap().display()); loop { - if let Ok((socket, _)) = list.accept().await { + + if let Ok((socket, a)) = list.accept().await { + info!("CLI connection from {}", a.as_pathname().unwrap().display()); process_connection(socket).await; + } else { + error!("Cannot poll connection to CLI"); } - sleep(Duration::from_millis(500)).await; + dbg!(1); + sleep(Duration::from_millis(300)).await; + } // Ok(()) }, @@ -65,7 +70,7 @@ async fn init_listener() -> anyhow::Result { // None // } // } - let socket_path = "noxis-rs"; + let socket_path = "noxis-rs.sock"; let _ = fs::remove_file(socket_path); Ok(UnixListener::bind(socket_path)?) } @@ -84,25 +89,28 @@ async fn init_listener() -> anyhow::Result { /// *depends on* : `tokio::net::TcpStream` /// async fn process_connection(mut stream: UnixStream) { - let buf_reader = BufReader::new(stream.borrow_mut()); - let mut rqst = buf_reader.lines(); + info!("Processing new connection"); - while let Ok(Some(line)) = rqst.next_line().await { - if line.is_empty() { - break + let mut buf = Vec::new(); + match stream.read_to_end(&mut buf).await { + Ok(_) => { + match serde_json::from_slice::(&buf) { + Ok(cli) => { + info!("Received CLI request: {:?}", cli); + // Обработка запроса + let response = "OK"; + if let Err(e) = stream.write_all(response.as_bytes()).await { + error!("Failed to send response: {}", e); + } + } + Err(e) => { + error!("Failed to parse CLI request: {}", e); + } + } } - match from_str::(&line) { - Ok(req) => { - // TODO: func wrapper - dbg!(req); - }, - Err(_) => { - break - }, + Err(e) => { + error!("Failed to read from socket: {}", e); } - println!("{}", line); } - - let response = "OK"; - stream.write_all(response.as_bytes()).await.unwrap(); + let _ = stream.shutdown().await; } -- 2.40.1 From 27e79ce731c4030c527f69f78997a836a5a7cab6 Mon Sep 17 00:00:00 2001 From: prplV Date: Fri, 28 Mar 2025 07:35:07 -0400 Subject: [PATCH 16/44] us update 2 --- noxis-cli/src/cli_net.rs | 4 +- noxis-rs/src/options/cli_pipeline.rs | 56 +++++++--------------------- 2 files changed, 15 insertions(+), 45 deletions(-) diff --git a/noxis-cli/src/cli_net.rs b/noxis-cli/src/cli_net.rs index f148849..7bb0178 100644 --- a/noxis-cli/src/cli_net.rs +++ b/noxis-cli/src/cli_net.rs @@ -16,13 +16,13 @@ pub async fn try_send(cli: Cli) -> Result<()> { let msg = serde_json::to_vec(&cli) .map_err(|_| NoxisCliError::ToStringCliParsingParsing)?; - stream.write_all(&msg).await + stream.try_write(&msg) .map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?; stream.shutdown().await?; let mut response = Vec::new(); - stream.read_to_end(&mut response).await + stream.read(&mut response).await .map_err(|_| NoxisCliError::CliResponseReadError)?; println!("Received response: {}", String::from_utf8_lossy(&response)); diff --git a/noxis-rs/src/options/cli_pipeline.rs b/noxis-rs/src/options/cli_pipeline.rs index 96233e9..1c85dae 100644 --- a/noxis-rs/src/options/cli_pipeline.rs +++ b/noxis-rs/src/options/cli_pipeline.rs @@ -3,7 +3,6 @@ use tokio::net::{ UnixStream, UnixListener }; use anyhow::Result as DynResult; use tokio::time::{sleep, Duration}; use std::fs; -// use std::io::BufReader; use tokio::io::{ AsyncWriteExt, AsyncReadExt}; use noxis_cli::Cli; @@ -21,17 +20,20 @@ use noxis_cli::Cli; /// *depends on* : - /// pub async fn init_cli_pipeline() -> DynResult<()> { - match init_listener().await { + let socket_path = "noxis-rs.sock"; + let _ = fs::remove_file(socket_path); + + match UnixListener::bind(socket_path) { Ok(list) => { // TODO: remove `unwrap`s info!("Listening on {}", &list.local_addr()?.as_pathname().unwrap().display()); loop { - - if let Ok((socket, a)) = list.accept().await { - info!("CLI connection from {}", a.as_pathname().unwrap().display()); - process_connection(socket).await; - } else { - error!("Cannot poll connection to CLI"); + match list.accept().await { + Ok((socket, addr)) => { + info!("CLI connection from {}", addr.as_pathname().unwrap().display()); + process_connection(socket).await; + }, + Err(er) => error!("Cannot poll connection to CLI due to {}", er), } dbg!(1); sleep(Duration::from_millis(300)).await; @@ -41,40 +43,11 @@ pub async fn init_cli_pipeline() -> DynResult<()> { }, Err(er) => { error!("Failed to open UnixListener for CLI"); - Err(er) + Err(er.into()) }, } } -/// # Fn `init_listener` -/// ## for creating TCP-listener for communicating with CLI -/// -/// *input* : - -/// -/// *output* : `Some` if port 7753 was opened | None if not -/// -/// *initiator* : fn `init_cli_pipeline` -/// -/// *managing* : `TcpListener` object to handle requests -/// -/// *depends on* : `tokio::net::TcpListener` -/// -async fn init_listener() -> anyhow::Result { - // match TcpListener::bind("127.0.0.1:7753").await { - // Ok(listener) => { - // info!("Runner is listening localhost:7753"); - // Some(listener) - // }, - // Err(_) => { - // error!("Cannot create TCP listener for CLI"); - // None - // } - // } - let socket_path = "noxis-rs.sock"; - let _ = fs::remove_file(socket_path); - Ok(UnixListener::bind(socket_path)?) -} - /// # Fn `process_connection` /// ## for processing input CLI requests /// @@ -88,16 +61,13 @@ async fn init_listener() -> anyhow::Result { /// /// *depends on* : `tokio::net::TcpStream` /// -async fn process_connection(mut stream: UnixStream) { - info!("Processing new connection"); - +async fn process_connection(mut stream: UnixStream) { let mut buf = Vec::new(); - match stream.read_to_end(&mut buf).await { + match stream.read(&mut buf).await { Ok(_) => { match serde_json::from_slice::(&buf) { Ok(cli) => { info!("Received CLI request: {:?}", cli); - // Обработка запроса let response = "OK"; if let Err(e) = stream.write_all(response.as_bytes()).await { error!("Failed to send response: {}", e); -- 2.40.1 From 011c479550087251e01f34f94097436320749a34 Mon Sep 17 00:00:00 2001 From: prplV Date: Mon, 31 Mar 2025 09:46:22 -0400 Subject: [PATCH 17/44] big async optimization --- noxis-cli/src/cli_error.rs | 8 +- noxis-cli/src/cli_net.rs | 14 +- noxis-rs/Cargo.toml | 2 +- noxis-rs/src/main.rs | 16 ++- noxis-rs/src/options/cli_pipeline.rs | 35 ++--- noxis-rs/src/options/config.rs | 183 +++++++++++++-------------- 6 files changed, 134 insertions(+), 124 deletions(-) diff --git a/noxis-cli/src/cli_error.rs b/noxis-cli/src/cli_error.rs index 7589738..d5bae9b 100644 --- a/noxis-cli/src/cli_error.rs +++ b/noxis-cli/src/cli_error.rs @@ -2,14 +2,14 @@ use thiserror::Error; #[derive(Debug, Error)] pub enum NoxisCliError { - #[error("Can't find socket `{0}`. Noxis-rs daemon is disabled or can't be accessed using Unix-Socket")] - NoxisDaemonMissing(String), + #[error("Can't find socket `{0}`. Error : {1}")] + NoxisDaemonMissing(String, String), #[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's runtime!")] PortIsNotWritable, #[error("Can't send Cli-prompt to the Noxis-rs. Check it's state")] CliPromptCanNotBeSent, #[error("Can't parse CLI struct and send as byte stream")] ToStringCliParsingParsing, - #[error("Can't read Noxis response")] - CliResponseReadError + #[error("Can't read Noxis response due to {0}")] + CliResponseReadError(String) } \ No newline at end of file diff --git a/noxis-cli/src/cli_net.rs b/noxis-cli/src/cli_net.rs index 7bb0178..a3300ed 100644 --- a/noxis-cli/src/cli_net.rs +++ b/noxis-cli/src/cli_net.rs @@ -6,7 +6,7 @@ use super::Cli; use super::cli_error::NoxisCliError; async fn create_us_stream(cli: &Cli) -> Result { - Ok(UnixStream::connect(&cli.socket).await.map_err(|_| NoxisCliError::NoxisDaemonMissing((&cli.socket).to_string()))?) + Ok(UnixStream::connect(&cli.socket).await.map_err(|er| NoxisCliError::NoxisDaemonMissing((&cli.socket).to_string(), er.to_string()))?) } pub async fn try_send(cli: Cli) -> Result<()> { @@ -16,14 +16,14 @@ pub async fn try_send(cli: Cli) -> Result<()> { let msg = serde_json::to_vec(&cli) .map_err(|_| NoxisCliError::ToStringCliParsingParsing)?; - stream.try_write(&msg) + stream.write_all(&msg) + .await .map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?; - stream.shutdown().await?; - - let mut response = Vec::new(); - stream.read(&mut response).await - .map_err(|_| NoxisCliError::CliResponseReadError)?; + let mut response = [0; 1024]; + stream.read(&mut response) + .await + .map_err(|er| NoxisCliError::CliResponseReadError(er.to_string()))?; println!("Received response: {}", String::from_utf8_lossy(&response)); Ok(()) diff --git a/noxis-rs/Cargo.toml b/noxis-rs/Cargo.toml index 0745074..86cf9bd 100644 --- a/noxis-rs/Cargo.toml +++ b/noxis-rs/Cargo.toml @@ -11,7 +11,7 @@ env_logger = "0.11.3" inotify = "0.10.2" log = "0.4.22" pcap = "2.2.0" -redis = "0.25.4" +redis = "0.29.2" serde = { version = "1.0.203", features = ["derive"] } serde_json = "1.0.118" sysinfo = "0.32.0" diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index 951eadf..7341bac 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -16,7 +16,7 @@ use options::preboot::PrebootParams; use tokio::sync::{broadcast, oneshot}; use options::config::v2::init_config_mechanism; -#[tokio::main(flavor = "multi_thread")] +#[tokio::main(flavor = "multi_thread", worker_threads = 4)] async fn main() -> anyhow::Result<()>{ let preboot = Arc::new(PrebootParams::parse().validate()?); @@ -36,12 +36,22 @@ async fn main() -> anyhow::Result<()>{ ).await; }); handler.push(config_module); - + let cli_module = tokio::spawn(async move { - let _ = init_cli_pipeline().await; + if let Err(er) = init_cli_pipeline().await { + error!("CLI pipeline failed due to {}", er) + } }); handler.push(cli_module); + let ctrlc = tokio::spawn(async move { + if let Err(er) = set_valid_destructor(vec![].into()).await { + error!("CTRLC mod failed!"); + } + std::process::exit(0); + }); + handler.push(ctrlc); + for i in handler { let _ = i.await; } diff --git a/noxis-rs/src/options/cli_pipeline.rs b/noxis-rs/src/options/cli_pipeline.rs index 1c85dae..c31df72 100644 --- a/noxis-rs/src/options/cli_pipeline.rs +++ b/noxis-rs/src/options/cli_pipeline.rs @@ -1,8 +1,8 @@ -use log::{error, info}; +use log::{error, info, warn}; use tokio::net::{ UnixStream, UnixListener }; use anyhow::Result as DynResult; use tokio::time::{sleep, Duration}; -use std::fs; +use std::{fs, io::{Read, Write}, os::fd::AsFd, path::Path}; use tokio::io::{ AsyncWriteExt, AsyncReadExt}; use noxis_cli::Cli; @@ -20,24 +20,24 @@ use noxis_cli::Cli; /// *depends on* : - /// pub async fn init_cli_pipeline() -> DynResult<()> { - let socket_path = "noxis-rs.sock"; + let socket_path = "noxis.sock"; let _ = fs::remove_file(socket_path); match UnixListener::bind(socket_path) { Ok(list) => { // TODO: remove `unwrap`s - info!("Listening on {}", &list.local_addr()?.as_pathname().unwrap().display()); + info!("Listening on {}", socket_path); loop { match list.accept().await { - Ok((socket, addr)) => { - info!("CLI connection from {}", addr.as_pathname().unwrap().display()); + Ok((socket, _)) => { + // tokio::spawn(); process_connection(socket).await; }, - Err(er) => error!("Cannot poll connection to CLI due to {}", er), + Err(er) => { + error!("Cannot poll connection to CLI due to {}", er); + sleep(Duration::from_millis(300)).await; + }, } - dbg!(1); - sleep(Duration::from_millis(300)).await; - } // Ok(()) }, @@ -62,9 +62,14 @@ pub async fn init_cli_pipeline() -> DynResult<()> { /// *depends on* : `tokio::net::TcpStream` /// async fn process_connection(mut stream: UnixStream) { - let mut buf = Vec::new(); + let mut buf = vec![0; 1024]; match stream.read(&mut buf).await { - Ok(_) => { + Ok(0) => { + info!("Client disconnected "); + }, + Ok(n) => { + buf.truncate(n); + info!("CLI have sent {} bytes", n); match serde_json::from_slice::(&buf) { Ok(cli) => { info!("Received CLI request: {:?}", cli); @@ -77,10 +82,8 @@ async fn process_connection(mut stream: UnixStream) { error!("Failed to parse CLI request: {}", e); } } - } - Err(e) => { - error!("Failed to read from socket: {}", e); - } + }, + Err(e) => error!("Failed to read from socket: {}", e), } let _ = stream.shutdown().await; } diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index a1f3e56..9c2e69f 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -51,34 +51,34 @@ pub mod v2 { .unwrap_or("settings.json"); // future to init work with local config - let lc_future = tokio::spawn(async move { + let lc_future = tokio::spawn( // let params = params.clone(); local_config_reciever( params_clone, rx_pb_lc, rx_cli_lc, Arc::new(brd_tx) - ).await - }); + ) + ); // dbg!("before pb"); // future to init work with pub sub mechanism - let pubsub_future = tokio::spawn(async move { + let pubsub_future = tokio::spawn( pubsub_config_reciever( tx_pb_lc, params.clone(), local_config_brd_reciever - ).await - }); + ) + ); // dbg!("before cli"); // future to catch new configs from cli pipeline - let cli_future = tokio::spawn(async move { + let cli_future = tokio::spawn( from_cli_config_reciever( cli_oneshot, tx_cli_lc - ).await + ) - }); + ); // let _ = lc_future.await; // dbg!("before select"); tokio::select! { @@ -120,10 +120,10 @@ pub mod v2 { }, cli_config_option = cli_future => { match cli_config_option { - Err(_) => error!("Cli pulling new config mechanism crushed, restarting ..."), + Err(_) => error!("CLI pulling new config mechanism crushed, restarting ..."), Ok(option_config) => { match option_config { - None => error!("Cli pulling new config mechanism crushed, restarting ..."), + None => error!("CLI pulling new config mechanism crushed, restarting ..."), Some(config) => { info!("New config was pulled from CLI, saving and restarting ..."); let _ = save_new_config(&config, lc_path); @@ -163,88 +163,78 @@ pub mod v2 { ) -> anyhow::Result<()>{ /*...*/ // dbg!("start of pb"); - sleep(Duration::from_secs(1)).await; - let mut tx_brd_local = tx_brd_local; - let mut local_config = Processes::default(); - - for retry in 1..=5 { - if !tx_brd_local.is_empty() { - match tx_brd_local.recv().await { - Ok(lc) => local_config = lc, - Err(er) => { - error!("Cannot get imported local config due to {}", &er); - return Err(anyhow::Error::msg( - format!("Cannot get imported local config due to {}", er)) - ) - } - } + let local_config = if !tx_brd_local.is_empty() { + tx_brd_local.recv().await? + } else { + // Processes::default() + let mut tick = tokio::time::interval(Duration::from_millis(500)); + loop { + tick.tick().await; + break match tx_brd_local.recv().await { + Ok(conf) => conf, + Err(_) => continue, + }; } - match get_redis_connection(&local_config.config_server).await { - Some(mut conn) => { - // - let mut pub_sub = conn.as_pubsub(); - let channel_name = get_container_id().unwrap_or(String::from("default")); - let channel_name = channel_name.trim(); - match pub_sub.subscribe(channel_name) { - Err(er) => { - error!("Cannot subscribe pubsub channel due to {}", &er); - return Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er))) - }, - Ok(_) => { - info!("Successfully subscribed to {} pubsub channel", channel_name); - loop { - // pubsub check - if let Ok(msg) = pub_sub.get_message() { - // dbg!("ok on get message"); - let payload : Result = msg.get_payload(); - match payload { - Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "), - Ok(payload) => { - if let Some(remote) = parse_extern_config(&payload) { - match config_comparing(&local_config, &remote) { - ConfigActuality::Local => { - warn!("Pulled new config from Redis channel, it's outdated. Ignoring ..."); - }, - ConfigActuality::Remote => { - info!("Pulled new actual config from Redis channel, version - `{}`", remote.date_of_creation); - // to stop watching local config file mechanism - let _ = local_conf_tx.send(true); - let config_path = params.config.to_str().unwrap_or("settings.json"); - - if save_new_config(&remote, &config_path).is_err() { - error!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path); - return Err(anyhow::Error::msg( - format!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path) - )) - } - return Ok(()); - }, - } + }; + match get_redis_connection(&local_config.config_server).await { + Some(mut conn) => { + let mut pub_sub = conn.as_pubsub(); + let channel_name = get_container_id().unwrap_or(String::from("default")); + let channel_name = channel_name.trim(); + match pub_sub.subscribe(channel_name) { + Err(er) => { + error!("Cannot subscribe pubsub channel due to {}", &er); + return Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er))) + }, + Ok(_) => { + info!("Successfully subscribed to {} pubsub channel", channel_name); + let _ = pub_sub.set_read_timeout(Some(Duration::from_secs(3))); + loop { + if let Ok(msg) = pub_sub.get_message() { + // dbg!("ok on get message"); + let payload : Result = msg.get_payload(); + match payload { + Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "), + Ok(payload) => { + if let Some(remote) = parse_extern_config(&payload) { + match config_comparing(&local_config, &remote) { + ConfigActuality::Local => { + warn!("Pulled new config from Redis channel, it's outdated. Ignoring ..."); + }, + ConfigActuality::Remote => { + info!("Pulled new actual config from Redis channel, version - `{}`", remote.date_of_creation); + // to stop watching local config file mechanism + let _ = local_conf_tx.send(true); + let config_path = params.config.to_str().unwrap_or("settings.json"); + + if save_new_config(&remote, &config_path).is_err() { + error!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path); + return Err(anyhow::Error::msg( + format!("Error with saving new config to {}. Stopping pubsub mechanism...", config_path) + )) + } + return Ok(()); + }, } - else { - warn!("Invalid config was pulled from Redis channel") - } - }, - } + } + else { + warn!("Invalid config was pulled from Redis channel") + } + }, } - // delay - // dbg!("before sleep pubsub"); - sleep(Duration::from_millis(500)).await; } - }, - } - }, - None => { - warn!("Cannot validly connect Redis connection. Blocking task for 20 secs and restarting tries (attempt {})", retry); - sleep(Duration::from_secs(20)).await; + // delay + tokio::task::yield_now().await; + } + }, } + }, + None => { + sleep(Duration::from_secs(20)).await; } } - error!("End of retries. Stopping pubsub..."); - return Err(anyhow::Error::msg( - format!("End of retries. Stopping pubsub...") - )) + Ok(()) } // @@ -256,7 +246,7 @@ pub mod v2 { /*...*/ ) -> anyhow::Result<()> { /*...*/ - // borrowing as mut + // shadowing as mut let mut pubsub_oneshot = pubsub_oneshot; let mut cli_oneshot = cli_oneshot; // fill with default empty config, mut to change later @@ -357,6 +347,7 @@ pub mod v2 { } } sleep(Duration::from_millis(300)).await; + // tokio::task::yield_now().await; } }, Err(_) => { @@ -374,14 +365,20 @@ pub mod v2 { ) -> Option { /* match awaits til channel*/ // dbg!("start of cli"); - match cli_oneshot.await { - Ok(config_from_cli) => { - info!("New actual config `{}` from CLI was pulled. Saving and restaring ...", &config_from_cli.date_of_creation); - let _ = to_local_tx.send(true); - Some(config_from_cli) - }, - _ => None, + loop { + if !cli_oneshot.is_empty() { + match cli_oneshot.await { + Ok(config_from_cli) => { + info!("New actual config `{}` from CLI was pulled. Saving and restaring ...", &config_from_cli.date_of_creation); + let _ = to_local_tx.send(true); + return Some(config_from_cli) + }, + _ => return None, + } + } + sleep(Duration::from_millis(300)).await; } + } async fn export_saved_config_data_locally( -- 2.40.1 From 8f1214bd9a4149a58e11aa9a094a660bea53d10a Mon Sep 17 00:00:00 2001 From: prplV Date: Mon, 31 Mar 2025 10:06:18 -0400 Subject: [PATCH 18/44] refactor --- noxis-rs/src/options/cli_pipeline.rs | 7 +++---- noxis-rs/src/options/signals.rs | 2 +- noxis-rs/src/utils/files.rs | 12 ++++++------ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/noxis-rs/src/options/cli_pipeline.rs b/noxis-rs/src/options/cli_pipeline.rs index c31df72..0c13e22 100644 --- a/noxis-rs/src/options/cli_pipeline.rs +++ b/noxis-rs/src/options/cli_pipeline.rs @@ -1,8 +1,7 @@ -use log::{error, info, warn}; +use log::{error, info}; use tokio::net::{ UnixStream, UnixListener }; -use anyhow::Result as DynResult; use tokio::time::{sleep, Duration}; -use std::{fs, io::{Read, Write}, os::fd::AsFd, path::Path}; +use std::fs; use tokio::io::{ AsyncWriteExt, AsyncReadExt}; use noxis_cli::Cli; @@ -19,7 +18,7 @@ use noxis_cli::Cli; /// /// *depends on* : - /// -pub async fn init_cli_pipeline() -> DynResult<()> { +pub async fn init_cli_pipeline() -> anyhow::Result<()> { let socket_path = "noxis.sock"; let _ = fs::remove_file(socket_path); diff --git a/noxis-rs/src/options/signals.rs b/noxis-rs/src/options/signals.rs index 7604bde..f840510 100644 --- a/noxis-rs/src/options/signals.rs +++ b/noxis-rs/src/options/signals.rs @@ -22,7 +22,7 @@ type SendersVec = Arc>>>; /// /// *depends on* : Sig, Signals /// -pub async fn set_valid_destructor(senders: SendersVec) -> Result<(), CustomError> { +pub async fn set_valid_destructor(senders: SendersVec) -> anyhow::Result<()> { let (mut int, mut term, mut stop) = ( Sig::new(Signals::Sigint, senders.clone()), Sig::new(Signals::Sigterm, senders.clone()), diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index 639ced2..3d0dade 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -20,7 +20,7 @@ use tokio::time::Duration; /// /// *depends on* : - /// -pub async fn create_watcher(filename: &str, path: &str) -> Result { +pub async fn create_watcher(filename: &str, path: &str) -> anyhow::Result { let src = format!("{}{}", path, filename); let inotify: Inotify = Inotify::init()?; inotify.watches().add(&src, WatchMask::ALL_EVENTS)?; @@ -45,12 +45,12 @@ pub async fn file_handler( files: &[Files], tx: Arc>, watchers: Arc>>, -) -> Result<(), CustomError> { +) -> anyhow::Result<()> { for (i, file) in files.iter().enumerate() { // let src = format!("{}{}", file.src, file.filename); if check_file(&file.filename, &file.src).await.is_err() { if !is_active(name).await || is_frozen(name).await { - return Err(CustomError::Fatal); + return Err(anyhow::Error::msg("Process is frozen or stopped")); } match file.triggers.on_delete.as_str() { "stay" => { @@ -61,18 +61,18 @@ pub async fn file_handler( if is_active(name).await { tx.send(1).await.unwrap(); } - return Err(CustomError::Fatal); + return Err(anyhow::Error::msg("Process was stopped")); } "hold" => { if is_active(name).await { tx.send(2).await.unwrap(); - return Err(CustomError::Fatal); + return Err(anyhow::Error::msg("Process was frozen")); } } _ => { tokio::time::sleep(Duration::from_millis(50)).await; tx.send(101).await.unwrap(); - return Err(CustomError::Fatal); + return Err(anyhow::Error::msg("Impermissible character or word in file trigger")); } } } else if is_active(name).await && !is_frozen(name).await { -- 2.40.1 From 886ae6308bab05fd95e032b60cedada2279c7e6c Mon Sep 17 00:00:00 2001 From: prplV Date: Mon, 31 Mar 2025 10:08:50 -0400 Subject: [PATCH 19/44] versions change --- noxis-cli/Cargo.toml | 2 +- noxis-rs/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/noxis-cli/Cargo.toml b/noxis-cli/Cargo.toml index 4c34412..e02d5f8 100644 --- a/noxis-cli/Cargo.toml +++ b/noxis-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noxis-cli" -version = "0.2.4" +version = "0.2.7" edition = "2021" [dependencies] diff --git a/noxis-rs/Cargo.toml b/noxis-rs/Cargo.toml index 86cf9bd..8207b26 100644 --- a/noxis-rs/Cargo.toml +++ b/noxis-rs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noxis-rs" -version = "0.11.10" +version = "0.11.26" edition = "2021" [dependencies] -- 2.40.1 From 584404c05042d1d6d311e9224064c5caf4901ae0 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 10 Apr 2025 08:52:58 -0400 Subject: [PATCH 20/44] preparing for prcsv2 --- noxis-rs/settings.json | 4 +-- noxis-rs/src/main.rs | 20 +++++++++++--- noxis-rs/src/options/structs.rs | 33 +++++++++++++++++++++++ noxis-rs/src/utils.rs | 47 ++++++++++++++++++++++++++++++--- 4 files changed, 94 insertions(+), 10 deletions(-) diff --git a/noxis-rs/settings.json b/noxis-rs/settings.json index 10485c8..bb5f44a 100644 --- a/noxis-rs/settings.json +++ b/noxis-rs/settings.json @@ -12,7 +12,7 @@ "src": "./tests/examples/", "triggers": { "onDelete": "stop", - "onChange": "stay" + "onChange": "restart" } } ], @@ -23,7 +23,7 @@ "triggers": { "wait": 10, "delay": 2, - "onLost": "hold" + "onLost": "restart" } } ] diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index 7341bac..aa9eeb1 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -15,6 +15,7 @@ use utils::*; use options::preboot::PrebootParams; use tokio::sync::{broadcast, oneshot}; use options::config::v2::init_config_mechanism; +use utils::v2::init_monitoring; #[tokio::main(flavor = "multi_thread", worker_threads = 4)] async fn main() -> anyhow::Result<()>{ @@ -23,11 +24,13 @@ async fn main() -> anyhow::Result<()>{ let _ = setup_logger(); info!("Noxis is configurating..."); - - let (tx_brd, mut _rx_brd) = broadcast::channel::(1); - let (_tx_oneshot, rx_oneshot) = oneshot::channel::(); + // + let (tx_brd, mut rx_brd) = broadcast::channel::(1); + // cli <-> config + let (tx_oneshot, rx_oneshot) = oneshot::channel::(); let mut handler: Vec> = vec![]; + // initilaizing task for config manipulations let config_module = tokio::spawn(async move { let _ = init_config_mechanism( rx_oneshot, @@ -37,6 +40,7 @@ async fn main() -> anyhow::Result<()>{ }); handler.push(config_module); + // initilaizing task for cli manipulation let cli_module = tokio::spawn(async move { if let Err(er) = init_cli_pipeline().await { error!("CLI pipeline failed due to {}", er) @@ -44,14 +48,22 @@ async fn main() -> anyhow::Result<()>{ }); handler.push(cli_module); + // initilaizing task for deinitializing `Noxis` let ctrlc = tokio::spawn(async move { if let Err(er) = set_valid_destructor(vec![].into()).await { - error!("CTRLC mod failed!"); + error!("Destructor mod failed due to {}", er); } std::process::exit(0); }); handler.push(ctrlc); + let monitoring = tokio::spawn(async move { + if let Err(er) = init_monitoring(&mut rx_brd).await { + error!("Monitoring mod failed due to {}", er); + } + }); + handler.push(monitoring); + for i in handler { let _ = i.await; } diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 18ae5e9..55a5222 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -3,6 +3,39 @@ use std::net::Ipv4Addr; use serde::{Deserialize, Serialize}; +pub enum DependencyType { + File, + Service, +} +pub enum Triggers<'a> { + File{ on_change: &'a str, on_delete: &'a str }, + Service(&'a str), +} + +impl std::fmt::Display for DependencyType { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + return match self { + DependencyType::File => write!(f, "File"), + DependencyType::Service => write!(f, "Service"), + } + } +} +pub enum ProcessState { + Pending, + Holding, + Stopped, + StoppedByCli, +} +pub enum Events<'a> { + Positive(&'a str), + Negative(NegativeOutcomes<'a>) +} +pub enum NegativeOutcomes<'a> { + FileWasChanged(&'a str, DependencyType, &'a str), + FileWasMovedOrDeleted(&'a str, DependencyType, &'a str), + ServiceIsUnreachable(&'a str, DependencyType, &'a str), +} + /// # an Error enum (next will be deleted and replaced) pub enum CustomError { Fatal, diff --git a/noxis-rs/src/utils.rs b/noxis-rs/src/utils.rs index 9ead34d..468075c 100644 --- a/noxis-rs/src/utils.rs +++ b/noxis-rs/src/utils.rs @@ -6,12 +6,11 @@ pub mod services; // TODO : saving current flags state -use crate::options::structs::CustomError; -use crate::options::structs::TrackingProcess; +use crate::options::structs::{CustomError, TrackingProcess, Processes}; use files::create_watcher; use files::file_handler; use inotify::Inotify; -use log::{error, warn}; +use log::{error, warn, info}; use prcs::{ freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process, unfreeze_process, @@ -22,9 +21,50 @@ use std::sync::Arc; use tokio::join; use tokio::sync::mpsc; use tokio::time::Duration; +use tokio::sync::broadcast::Receiver; +use tokio::sync::mpsc::{Receiver as MpscReciever, Sender as MpscSender}; const GET_ID_CMD: &str = "hostname"; +pub mod v2 { + use super::*; + + // spawn tasks + // spawn prc + // spawn files + // spawn services + // ## for ... i.await in loop + pub async fn init_monitoring( + local_config: &mut Receiver, + ) -> anyhow::Result<()> { + let config = if !local_config.is_empty() { + local_config.recv().await? + } else { + let mut tick = tokio::time::interval(Duration::from_millis(500)); + loop { + tick.tick().await; + break match local_config.try_recv() { + Ok(conf) => conf, + Err(_) => continue, + } + } + }; + info!("Processing {} processes ...", config.processes.len()); + // LinkedList + // LinkedList + Ok(()) + } + + // spawn prc check with semaphore check + async fn prcs_monitoriing() -> anyhow::Result<()> { Ok(()) } + + // spawn file check with semaphore check + async fn files_monitoriing() -> anyhow::Result<()> { Ok(()) } + + // spawn service check with semaphore check + async fn services_monitoriing() -> anyhow::Result<()> { Ok(()) } +} + /// # Fn `run_daemons` /// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel /// @@ -133,7 +173,6 @@ async fn process_protocol_symbol(proc: Arc, val: u8) -> Result< }, // // 9 - File-dependency change -> staying (after check) 9 => { - // no need to trash logs warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name); tokio::time::sleep(Duration::from_millis(100)).await; }, -- 2.40.1 From 2b82fb7aac5320e4d53bda347bf3be39c00734d6 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 10 Apr 2025 08:53:38 -0400 Subject: [PATCH 21/44] prcs v2 controller with impl --- noxis-rs/src/utils/prcs.rs | 94 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index a434c43..726d703 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -3,6 +3,100 @@ use log::{error, warn}; use std::process::{Command, Output}; use std::sync::Arc; use tokio::time::Duration; +use crate::options::structs::{TrackingProcess, ProcessState, Events, NegativeOutcomes}; +use std::collections::HashSet; +use tokio::sync::mpsc::Receiver as MpscReciever; + +pub mod v2 { + use log::info; + use crate::options::structs::DependencyType; + + use super::*; + pub struct ProcessController<'a> { + name: &'a str, + obj: Arc, + state: ProcessState, + event_reader: MpscReciever>, + negative_events: HashSet<&'a str>, + } + + impl<'a> ProcessController<'a> { + pub async fn process(&mut self) { + if let Ok(event) = self.event_reader.try_recv() { + match event { + Events::Positive(target) => { + if self.negative_events.contains(target) { + self.negative_events.remove(target); + } + }, + Events::Negative(event) => { + match event { + NegativeOutcomes::FileWasChanged(target, dep_type, trigger) | + NegativeOutcomes::FileWasMovedOrDeleted(target, dep_type, trigger) | + NegativeOutcomes::ServiceIsUnreachable(target, dep_type, trigger) => { + if !self.negative_events.contains(target) { + self.negative_events.insert(target); + + self.trigger_on( + target, + trigger, + dep_type + ).await; + } + }, + } + }, + } + } + match self.state { + ProcessState::Holding => { + if self.negative_events.len() == 0 { + info!("No negative dependecies events on {} process. Unfreezing ...", self.name); + unfreeze_process(self.name).await; + } + }, + ProcessState::Stopped => { + if self.negative_events.len() == 0 { + info!("No negative dependecies events on {} process. Starting ...", self.name); + if let Err(_) = start_process(self.name, &self.obj.path).await { + error!("Cannot start process {} due to {}", self.name, "system unrecognized error"); + } + } + }, + _ => {}, + } + } + + async fn trigger_on(&mut self, dep_name: &str, trigger: &str, dep_type: DependencyType) { + match trigger { + "stay" => { + info!("Event on {} `{}` for {}. Ignoring ...", dep_type, dep_name, self.name); + }, + "stop" => { + if is_active(self.name).await { + info!("Event on {} `{}` for {}. Stopping ...", dep_type, dep_name, self.name); + terminate_process(self.name).await; + self.state = ProcessState::Stopped; + } + }, + "hold" => { + if !is_frozen(self.name).await { + info!("Event on {} `{}` for {}. Freezing ...", dep_type, dep_name, self.name); + freeze_process(self.name).await; + self.state = ProcessState::Holding; + } + }, + "restart" => { + info!("Event on {} `{}` for {}. Restarting ...", dep_type, dep_name, self.name); + let _ = restart_process(self.name, &self.obj.path).await; + }, + _ => error!("Impermissible trigger in file-trigger for {}. Ignoring event ...", self.name), + } + tokio::time::sleep(Duration::from_millis(100)).await; + } + async fn trigger_on_servcie(&mut self, file_name: &str, trigger: &str) {} + } +} /// # Fn `get_pid` /// ## for initializing process of unstoppable grubbing metrics. -- 2.40.1 From f504632c4dfc16d2ba8d9302d805fd51ee02a59e Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 10 Apr 2025 08:53:52 -0400 Subject: [PATCH 22/44] files v2 controller without impl --- noxis-rs/src/utils/files.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index 3d0dade..ee5d942 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -5,7 +5,25 @@ use std::borrow::BorrowMut; use std::path::Path; use std::sync::Arc; use tokio::sync::mpsc; +use tokio::sync::mpsc::Sender as MpscSender; use tokio::time::Duration; +use crate::options::structs::Events; + +pub mod v2 { + use std::collections::HashMap; + + use crate::options::structs::Triggers; + + use super::*; + + struct FilesController<'a> { + name: &'a str, + watcher: Inotify, + // obj: Arc, + triggers: HashMap<&'a str, Triggers<'a>>, + event_registrator: Vec>>, + } +} /// # Fn `create_watcher` /// ## for creating watcher on file's delete | update events -- 2.40.1 From 71acb4a32efe1c65f0ecc9048c2243acdc4cbce9 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 10 Apr 2025 08:54:00 -0400 Subject: [PATCH 23/44] services v2 controller without impl --- noxis-rs/src/utils/services.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index fb51f7d..608b914 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -5,6 +5,22 @@ use std::net::{TcpStream, ToSocketAddrs}; use std::sync::Arc; use tokio::sync::mpsc; use tokio::time::{Duration, Instant}; +use tokio::sync::mpsc::Sender as MpscSender; +use crate::options::structs::Events; + +pub mod v2 { + use crate::options::structs::Triggers; + + use super::*; + use std::collections::HashMap; + + struct ServicesController<'a> { + name: &'a str, + obj: Arc, + triggers: HashMap<&'a str, Triggers<'a>>, + event_registrator: Vec>>, + } +} /// # Fn `service_handler` /// ## function to realize mechanism of current process' dep services monitoring -- 2.40.1 From c50c444f2171c4460066803f85968e2e88e775e1 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 10 Apr 2025 08:54:12 -0400 Subject: [PATCH 24/44] pub commands (need later) --- noxis-cli/src/cli.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/noxis-cli/src/cli.rs b/noxis-cli/src/cli.rs index b96e85f..5a82b64 100644 --- a/noxis-cli/src/cli.rs +++ b/noxis-cli/src/cli.rs @@ -12,7 +12,7 @@ pub struct Cli { subcommand, help = "to manage Noxis work", )] - command : Commands, + pub command : Commands, } #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] @@ -56,13 +56,13 @@ pub struct StartAction { num_args = 1.., value_delimiter = ' ' )] - flags : Vec, + pub flags : Vec, } #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] pub struct ConfigCommand { #[command(subcommand)] - action : ConfigAction, + pub action : ConfigAction, } #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] @@ -89,12 +89,12 @@ pub struct LocalConfig { action, help = "to read following input as JSON", )] - is_json : bool, + pub is_json : bool, // value #[arg( help = "path to config file or config String (with --json flag)", )] - config : String, + pub config : String, } #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] @@ -102,16 +102,16 @@ pub struct ProcessCommand { #[arg( help = "name of needed process", )] - process : String, + pub process : String, #[command( subcommand, help = "To get current process's status", )] - action : ProcessAction, + pub action : ProcessAction, } #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] -enum ProcessAction { +pub enum ProcessAction { #[command( about = "To get info about current process status", )] -- 2.40.1 From 721fa6c758f1bed4300f9bd17e20609bd737ad02 Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 10 Apr 2025 08:58:40 -0400 Subject: [PATCH 25/44] prc state check logic fixed without code repeating --- noxis-rs/src/utils/prcs.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index 726d703..22eca4a 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -48,22 +48,20 @@ pub mod v2 { }, } } - match self.state { - ProcessState::Holding => { - if self.negative_events.len() == 0 { + if self.negative_events.len() == 0 { + match self.state { + ProcessState::Holding => { info!("No negative dependecies events on {} process. Unfreezing ...", self.name); unfreeze_process(self.name).await; - } - }, - ProcessState::Stopped => { - if self.negative_events.len() == 0 { + }, + ProcessState::Stopped => { info!("No negative dependecies events on {} process. Starting ...", self.name); if let Err(_) = start_process(self.name, &self.obj.path).await { error!("Cannot start process {} due to {}", self.name, "system unrecognized error"); } - } - }, - _ => {}, + }, + _ => {}, + } } } -- 2.40.1 From cd7669d942b36e93332f54822164e3dcac4fb553 Mon Sep 17 00:00:00 2001 From: prplV Date: Tue, 15 Apr 2025 10:32:29 -0400 Subject: [PATCH 26/44] controllers impls and trait --- noxis-rs/src/options/structs.rs | 4 ++ noxis-rs/src/utils/files.rs | 46 +++++++++++++++++++---- noxis-rs/src/utils/prcs.rs | 65 +++++++++++++++++---------------- noxis-rs/src/utils/services.rs | 16 ++++++-- 4 files changed, 88 insertions(+), 43 deletions(-) diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 55a5222..282a13d 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -36,6 +36,10 @@ pub enum NegativeOutcomes<'a> { ServiceIsUnreachable(&'a str, DependencyType, &'a str), } +pub trait ProcessUnit<'a> { + fn process(&mut self) -> impl std::future::Future + Send; +} + /// # an Error enum (next will be deleted and replaced) pub enum CustomError { Fatal, diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index ee5d942..678b084 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -10,18 +10,50 @@ use tokio::time::Duration; use crate::options::structs::Events; pub mod v2 { - use std::collections::HashMap; - - use crate::options::structs::Triggers; - + // use std::collections::HashMap; + use crate::options::structs::{Triggers, ProcessUnit}; use super::*; + use std::path::Path; + + type EventHandlers<'a> = Vec>>; struct FilesController<'a> { name: &'a str, - watcher: Inotify, + path: String, + watcher: Option, // obj: Arc, - triggers: HashMap<&'a str, Triggers<'a>>, - event_registrator: Vec>>, + triggers: Triggers<'a>, + event_registrator: EventHandlers<'a>, + } + + impl<'a> FilesController<'a> { + pub fn new(name: &'a str, triggers: Triggers<'a>, event_registrator: EventHandlers<'a>) -> FilesController<'a> { + Self { + name, + path : String::new(), + watcher: None, + triggers, + event_registrator, + } + } + pub async fn with_path(&mut self, path: impl AsRef) -> anyhow::Result<()> { + self.path = path.as_ref().to_string_lossy().into_owned(); + self.watcher = Some({ + match create_watcher(self.name, &self.path).await { + Ok(val) => val, + Err(er) => return Err(er) + } + }); + Ok(()) + } + pub async fn trigger_on(&mut self) { + // trigger handler + } + } + impl<'a> ProcessUnit<'a> for FilesController<'a> { + async fn process(&mut self) { + // polling file check + } } } diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index 22eca4a..ef88228 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -3,7 +3,7 @@ use log::{error, warn}; use std::process::{Command, Output}; use std::sync::Arc; use tokio::time::Duration; -use crate::options::structs::{TrackingProcess, ProcessState, Events, NegativeOutcomes}; +use crate::options::structs::{TrackingProcess, ProcessState, Events, NegativeOutcomes, ProcessUnit}; use std::collections::HashSet; use tokio::sync::mpsc::Receiver as MpscReciever; @@ -21,7 +21,37 @@ pub mod v2 { } impl<'a> ProcessController<'a> { - pub async fn process(&mut self) { + async fn trigger_on(&mut self, dep_name: &str, trigger: &str, dep_type: DependencyType) { + match trigger { + "stay" => { + info!("Event on {} `{}` for {}. Ignoring ...", dep_type, dep_name, self.name); + }, + "stop" => { + if is_active(self.name).await { + info!("Event on {} `{}` for {}. Stopping ...", dep_type, dep_name, self.name); + terminate_process(self.name).await; + self.state = ProcessState::Stopped; + } + }, + "hold" => { + if !is_frozen(self.name).await { + info!("Event on {} `{}` for {}. Freezing ...", dep_type, dep_name, self.name); + freeze_process(self.name).await; + self.state = ProcessState::Holding; + } + }, + "restart" => { + info!("Event on {} `{}` for {}. Restarting ...", dep_type, dep_name, self.name); + let _ = restart_process(self.name, &self.obj.path).await; + }, + _ => error!("Impermissible trigger in file-trigger for {}. Ignoring event ...", self.name), + } + tokio::time::sleep(Duration::from_micros(100)).await; + } + } + + impl<'a> ProcessUnit<'a> for ProcessController<'a> { + async fn process(&mut self) { if let Ok(event) = self.event_reader.try_recv() { match event { Events::Positive(target) => { @@ -64,36 +94,7 @@ pub mod v2 { } } } - - async fn trigger_on(&mut self, dep_name: &str, trigger: &str, dep_type: DependencyType) { - match trigger { - "stay" => { - info!("Event on {} `{}` for {}. Ignoring ...", dep_type, dep_name, self.name); - }, - "stop" => { - if is_active(self.name).await { - info!("Event on {} `{}` for {}. Stopping ...", dep_type, dep_name, self.name); - terminate_process(self.name).await; - self.state = ProcessState::Stopped; - } - }, - "hold" => { - if !is_frozen(self.name).await { - info!("Event on {} `{}` for {}. Freezing ...", dep_type, dep_name, self.name); - freeze_process(self.name).await; - self.state = ProcessState::Holding; - } - }, - "restart" => { - info!("Event on {} `{}` for {}. Restarting ...", dep_type, dep_name, self.name); - let _ = restart_process(self.name, &self.obj.path).await; - }, - _ => error!("Impermissible trigger in file-trigger for {}. Ignoring event ...", self.name), - } - tokio::time::sleep(Duration::from_millis(100)).await; - } - async fn trigger_on_servcie(&mut self, file_name: &str, trigger: &str) {} - } + } } /// # Fn `get_pid` diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index 608b914..b90007d 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -9,16 +9,24 @@ use tokio::sync::mpsc::Sender as MpscSender; use crate::options::structs::Events; pub mod v2 { - use crate::options::structs::Triggers; + use crate::options::structs::{Triggers, ProcessUnit}; use super::*; use std::collections::HashMap; + type EventHandlers<'a> = Vec>>; + struct ServicesController<'a> { name: &'a str, - obj: Arc, - triggers: HashMap<&'a str, Triggers<'a>>, - event_registrator: Vec>>, + // obj: Arc, + triggers: Triggers<'a>, + event_registrator: EventHandlers<'a>, + } + // self impl + impl<'a> ProcessUnit<'a> for ServicesController<'a> { + async fn process(&mut self) { + + } } } -- 2.40.1 From e3f07f42a6b6194beee17eb6d49904297cdfaa6f Mon Sep 17 00:00:00 2001 From: prplV Date: Thu, 17 Apr 2025 09:59:33 -0400 Subject: [PATCH 27/44] new structs, controller and processors --- noxis-rs/src/options/structs.rs | 51 ++++++++++++++++++++ noxis-rs/src/utils/files.rs | 84 +++++++++++++++++++++++++++------ noxis-rs/src/utils/prcs.rs | 27 ++++++----- noxis-rs/src/utils/services.rs | 31 ++++++++---- 4 files changed, 157 insertions(+), 36 deletions(-) diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 282a13d..ef098c7 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -7,11 +7,61 @@ pub enum DependencyType { File, Service, } + +pub struct ServiceWaitConfig { wait: u32, delay: u32} + +impl Default for ServiceWaitConfig { + fn default() -> Self { + Self { wait: 0, delay: 5 } + } +} + +pub enum FileTriggerType { + OnChange, + OnDelete, +} + +impl std::fmt::Display for FileTriggerType { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + return match self { + FileTriggerType::OnChange => write!(f, "File was changed"), + FileTriggerType::OnDelete => write!(f, "File was moved or deleted"), + } + } +} + +impl<'a> FileTriggerType { + pub fn event(&self, file_name: &'a str, trigger: &'a str) -> Events<'a> { + return match self { + FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)), + FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)), + } + } + pub fn event_from_file_trigger_controller(&self, file_name: &'a str, trigger: &FileTriggersForController<'a>) -> Events<'a> { + return match self { + FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_change)), + FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_delete)), + } + } +} + pub enum Triggers<'a> { File{ on_change: &'a str, on_delete: &'a str }, Service(&'a str), } +impl<'a> Triggers<'a> { + pub fn new_file(on_change: &'a str, on_delete: &'a str) -> Triggers<'a> { + Triggers::File { on_change, on_delete } + } + pub fn new_service(on_lost: &'a str) -> Triggers<'a> { + Triggers::Service(on_lost) + } +} + +pub struct FileTriggersForController<'a> { pub on_change: &'a str, pub on_delete: &'a str } +pub struct ServiceTriggersForController<'a>(&'a str); + impl std::fmt::Display for DependencyType { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { return match self { @@ -20,6 +70,7 @@ impl std::fmt::Display for DependencyType { } } } + pub enum ProcessState { Pending, Holding, diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index 678b084..22060fb 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -10,49 +10,103 @@ use tokio::time::Duration; use crate::options::structs::Events; pub mod v2 { - // use std::collections::HashMap; - use crate::options::structs::{Triggers, ProcessUnit}; - use super::*; - use std::path::Path; + use log::{error, info, warn}; - type EventHandlers<'a> = Vec>>; + // use std::collections::HashMap; + use crate::options::structs::{FileTriggerType, FileTriggersForController as Triggers, ProcessUnit}; + use super::*; + use std::{collections::HashMap, path::Path}; + + // type EventHandlers<'a> = HashMap + type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender>)>; + // + type FileTriggersWrapper<'a> = HashMap<&'a str, Triggers<'a>>; struct FilesController<'a> { name: &'a str, path: String, watcher: Option, // obj: Arc, - triggers: Triggers<'a>, - event_registrator: EventHandlers<'a>, + triggers: EventHandlers<'a>, } impl<'a> FilesController<'a> { - pub fn new(name: &'a str, triggers: Triggers<'a>, event_registrator: EventHandlers<'a>) -> FilesController<'a> { + pub fn new(name: &'a str, triggers: EventHandlers<'a>) -> FilesController<'a> { Self { name, path : String::new(), watcher: None, triggers, - event_registrator, } } pub async fn with_path(&mut self, path: impl AsRef) -> anyhow::Result<()> { self.path = path.as_ref().to_string_lossy().into_owned(); - self.watcher = Some({ + self.watcher = { match create_watcher(self.name, &self.path).await { - Ok(val) => val, - Err(er) => return Err(er) + Ok(val) => Some(val), + Err(er) => { + error!("Cannot create watcher for {} ({}) due to {}", self.name, &self.path, er); + return Err(er) + } } - }); + }; Ok(()) } - pub async fn trigger_on(&mut self) { - // trigger handler + async fn trigger_on(&mut self, trigger_type: Option) { + let _ = self.triggers.iter() + .map(|(_, (triggers, channel))| async { + let _ = channel.send({ + match &trigger_type { + None => Events::Positive(self.name), + Some(event) => { + info!("Event on {} ({}) : {}", self.name, &self.path, event); + event.event_from_file_trigger_controller(self.name, triggers) + }, + } + }).await; + }); } } impl<'a> ProcessUnit<'a> for FilesController<'a> { async fn process(&mut self) { // polling file check + // 1) existing check + if let Ok(_) = check_file(self.name, &self.path).await { + match &mut self.watcher { + Some(notify) => { + let mut buffer = [0; 1024]; + if let Ok(mut notif_events) = notify.read_events(&mut buffer) { + if let (recreate_watcher, true) = ( + notif_events.any(|mask| mask.mask == EventMask::DELETE_SELF), + notif_events.any(|mask| mask.mask == EventMask::MODIFY) + ) { + warn!("File {} ({}) was changed", self.name, &self.path); + if recreate_watcher { + self.watcher = match create_watcher(self.name, &self.path).await { + Ok(notifier) => Some(notifier), + Err(er) => { + error!("Failed to recreate watcher for {} ({}) due to {}", + self.name, + &self.path, + er + ); + None + }, + } + } + self.trigger_on(Some(FileTriggerType::OnChange)).await; + } + } + }, + None => { /* DEAD END */}, + } + } else { + warn!("File {} ({}) was not found in determined scope", self.name, &self.path); + self.trigger_on(Some(FileTriggerType::OnDelete)).await; + return; + } + self.trigger_on(None).await; + // 2) change check } } } diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index ef88228..9d9c6c5 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -1,4 +1,3 @@ -use crate::options::structs::CustomError; use log::{error, warn}; use std::process::{Command, Output}; use std::sync::Arc; @@ -52,7 +51,7 @@ pub mod v2 { impl<'a> ProcessUnit<'a> for ProcessController<'a> { async fn process(&mut self) { - if let Ok(event) = self.event_reader.try_recv() { + while let Ok(event) = self.event_reader.try_recv() { match event { Events::Positive(target) => { if self.negative_events.contains(target) { @@ -82,12 +81,18 @@ pub mod v2 { match self.state { ProcessState::Holding => { info!("No negative dependecies events on {} process. Unfreezing ...", self.name); - unfreeze_process(self.name).await; + if let Err(er) = unfreeze_process(self.name).await { + error!("Cannot unfreeze process {} due to {}", self.name, er); + } else { + self.state = ProcessState::Pending; + } }, ProcessState::Stopped => { info!("No negative dependecies events on {} process. Starting ...", self.name); if let Err(_) = start_process(self.name, &self.obj.path).await { error!("Cannot start process {} due to {}", self.name, "system unrecognized error"); + } else { + self.state = ProcessState::Pending; } }, _ => {}, @@ -255,14 +260,11 @@ pub async fn freeze_process(name: &str) { /// /// *depends on* : - /// -pub async fn unfreeze_process(name: &str) { +pub async fn unfreeze_process(name: &str) -> anyhow::Result<()> { let _ = Command::new("pkill") .args(["-CONT", name]) - .output() - .unwrap_or_else(|_| { - error!("Failed to unfreeze process"); - std::process::exit(101); - }); + .output()?; + Ok(()) } /// # Fn `restart_process` @@ -278,7 +280,7 @@ pub async fn unfreeze_process(name: &str) { /// /// *depends on* : fn `start_process`, fn `terminate_process` /// -pub async fn restart_process(name: &str, path: &str) -> Result<(), CustomError> { +pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<()> { terminate_process(name).await; tokio::time::sleep(Duration::from_millis(100)).await; start_process(name, path).await @@ -297,7 +299,7 @@ pub async fn restart_process(name: &str, path: &str) -> Result<(), CustomError> /// /// *depends on* : - /// -pub async fn start_process(name: &str, path: &str) -> Result<(), CustomError> { +pub async fn start_process(name: &str, path: &str) -> anyhow::Result<()> { // let runsh = format!("{} {}", "exec", path); let mut command = Command::new(path); // command.arg(path); @@ -308,8 +310,7 @@ pub async fn start_process(name: &str, path: &str) -> Result<(), CustomError> { Ok(()) } Err(er) => { - println!("{:?}", er); - Err(CustomError::Fatal) + Err(anyhow::Error::msg(format!("Cannot start process {} due to {}", name, er))) } } } diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index b90007d..2477c6d 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -6,23 +6,38 @@ use std::sync::Arc; use tokio::sync::mpsc; use tokio::time::{Duration, Instant}; use tokio::sync::mpsc::Sender as MpscSender; -use crate::options::structs::Events; pub mod v2 { - use crate::options::structs::{Triggers, ProcessUnit}; + use crate::options::structs::{Triggers, ProcessUnit, Events, ServiceWaitConfig}; use super::*; use std::collections::HashMap; - type EventHandlers<'a> = Vec>>; + // type EventHandlers<'a> = Vec>>; + type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender>)>; struct ServicesController<'a> { - name: &'a str, - // obj: Arc, - triggers: Triggers<'a>, - event_registrator: EventHandlers<'a>, + name : &'a str, + access_url : String, + config: ServiceWaitConfig, + event_registrator : EventHandlers<'a>, + } + impl<'a> ServicesController<'a> { + pub fn new() -> ServicesController<'a> { + ServicesController { + name : "", + access_url : String::new(), + config: ServiceWaitConfig::default(), + event_registrator : EventHandlers::new(), + } + } + pub async fn with_params(&mut self, hostname: &'a str, port: Option<&'a str>, event_registrator: EventHandlers<'a>) -> anyhow::Result<()> { + self.name = hostname; + self.access_url = format!("{}{}", hostname, port.map_or_else(|| "".to_string(), |p| format!(":{}", p))); + self.event_registrator = event_registrator; + Ok(()) + } } - // self impl impl<'a> ProcessUnit<'a> for ServicesController<'a> { async fn process(&mut self) { -- 2.40.1 From 4fc90300fce55e7d79b45bd590912862f7daf09f Mon Sep 17 00:00:00 2001 From: prplV Date: Fri, 18 Apr 2025 08:44:16 -0400 Subject: [PATCH 28/44] file fix --- noxis-rs/src/utils/files.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index 22060fb..8ba72f7 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -19,8 +19,6 @@ pub mod v2 { // type EventHandlers<'a> = HashMap type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender>)>; - // - type FileTriggersWrapper<'a> = HashMap<&'a str, Triggers<'a>>; struct FilesController<'a> { name: &'a str, @@ -54,12 +52,12 @@ pub mod v2 { } async fn trigger_on(&mut self, trigger_type: Option) { let _ = self.triggers.iter() - .map(|(_, (triggers, channel))| async { + .map(|(prc_name, (triggers, channel))| async { let _ = channel.send({ match &trigger_type { None => Events::Positive(self.name), Some(event) => { - info!("Event on {} ({}) : {}", self.name, &self.path, event); + info!("Event on file {} ({}) : {}. Notifying `{}` ...", self.name, &self.path, event, *prc_name); event.event_from_file_trigger_controller(self.name, triggers) }, } @@ -94,7 +92,8 @@ pub mod v2 { }, } } - self.trigger_on(Some(FileTriggerType::OnChange)).await; + self.trigger_on(Some(FileTriggerType::OnChange)).await; + return; } } }, -- 2.40.1 From 0d68efd461b704591512559fdc14b352eaefc2cc Mon Sep 17 00:00:00 2001 From: prplV Date: Fri, 18 Apr 2025 08:44:35 -0400 Subject: [PATCH 29/44] service controller --- noxis-rs/src/options/structs.rs | 8 ++++++-- noxis-rs/src/utils/services.rs | 34 +++++++++++++++++++++++++++++---- 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index ef098c7..5951fd8 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -8,11 +8,15 @@ pub enum DependencyType { Service, } -pub struct ServiceWaitConfig { wait: u32, delay: u32} +pub enum ServiceState { + Ok, + Unavailable +} +pub struct ServiceWaitConfig(u32); impl Default for ServiceWaitConfig { fn default() -> Self { - Self { wait: 0, delay: 5 } + Self(5) } } diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index 2477c6d..11141a7 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -8,17 +8,20 @@ use tokio::time::{Duration, Instant}; use tokio::sync::mpsc::Sender as MpscSender; pub mod v2 { - use crate::options::structs::{Triggers, ProcessUnit, Events, ServiceWaitConfig}; + use crate::options::structs::{Triggers, ProcessUnit, Events, ServiceWaitConfig, ServiceState}; use super::*; - use std::collections::HashMap; + use std::collections::{HashMap, BTreeMap, VecDeque}; // type EventHandlers<'a> = Vec>>; - type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender>)>; + type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender>)>; + // type wrapper for service wait queue + type ConnectionQueue<'a> = BTreeMap, MpscSender>)>>; struct ServicesController<'a> { name : &'a str, access_url : String, + state: ServiceState, config: ServiceWaitConfig, event_registrator : EventHandlers<'a>, } @@ -27,6 +30,7 @@ pub mod v2 { ServicesController { name : "", access_url : String::new(), + state : ServiceState::Unavailable, config: ServiceWaitConfig::default(), event_registrator : EventHandlers::new(), } @@ -37,10 +41,32 @@ pub mod v2 { self.event_registrator = event_registrator; Ok(()) } + async fn check_state(&mut self) -> anyhow::Result<()> { + let mut addrs = self.access_url.to_socket_addrs()?; + if !addrs.any(|a| TcpStream::connect_timeout(&a, Duration::new(1, 0)).is_ok()) { + return Err(anyhow::Error::msg(format!("No access to service `{}`", &self.access_url))) + } + Ok(()) + } + async fn trigger_on(&mut self) {} } impl<'a> ProcessUnit<'a> for ServicesController<'a> { async fn process(&mut self) { - + // check_service(hostname, port) + let current_state = self.check_state().await; + match (&self.state, current_state) { + (ServiceState::Unavailable, Ok(_)) => { + warn!("Unreachable for connection service `{}`. Notifying {} process(es)", &self.access_url, self.event_registrator.len()); + // + self.state = ServiceState::Unavailable; + }, + (ServiceState::Ok, Err(_)) => { + warn!("Connection with `{}` service was established. Notifying {} process(es)", &self.access_url, self.event_registrator.len()); + // + self.state = ServiceState::Unavailable; + }, + _ => { /* DEAD END WITH NO INTEREST */ }, + } } } } -- 2.40.1 From 28092d945aa129e36e5431c85d7c8fa3991bd909 Mon Sep 17 00:00:00 2001 From: prplV Date: Tue, 22 Apr 2025 11:18:14 -0400 Subject: [PATCH 30/44] prcs logic fixed --- noxis-rs/src/utils/prcs.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index 9d9c6c5..c190303 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -82,15 +82,15 @@ pub mod v2 { ProcessState::Holding => { info!("No negative dependecies events on {} process. Unfreezing ...", self.name); if let Err(er) = unfreeze_process(self.name).await { - error!("Cannot unfreeze process {} due to {}", self.name, er); + error!("Cannot unfreeze process {} : {}", self.name, er); } else { self.state = ProcessState::Pending; } }, ProcessState::Stopped => { info!("No negative dependecies events on {} process. Starting ...", self.name); - if let Err(_) = start_process(self.name, &self.obj.path).await { - error!("Cannot start process {} due to {}", self.name, "system unrecognized error"); + if let Err(er) = start_process(self.name, &self.obj.path).await { + error!("Cannot start process {} : {}", self.name, er); } else { self.state = ProcessState::Pending; } -- 2.40.1 From 502ea114a6a6c449257a183b91f25a2228234314 Mon Sep 17 00:00:00 2001 From: prplV Date: Tue, 22 Apr 2025 11:18:36 -0400 Subject: [PATCH 31/44] debug + casting --- noxis-rs/src/options/structs.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 5951fd8..7d4d81b 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -8,6 +8,7 @@ pub enum DependencyType { Service, } +#[derive(Debug)] pub enum ServiceState { Ok, Unavailable @@ -49,17 +50,24 @@ impl<'a> FileTriggerType { } } +#[derive(Debug)] pub enum Triggers<'a> { File{ on_change: &'a str, on_delete: &'a str }, - Service(&'a str), + Service{on_lost: &'a str, wait: u32}, } impl<'a> Triggers<'a> { pub fn new_file(on_change: &'a str, on_delete: &'a str) -> Triggers<'a> { Triggers::File { on_change, on_delete } } - pub fn new_service(on_lost: &'a str) -> Triggers<'a> { - Triggers::Service(on_lost) + pub fn new_service(on_lost: &'a str, wait_time: u32) -> Triggers<'a> { + Triggers::Service{on_lost, wait: wait_time} + } + pub fn to_service_negative_event(&'a self, service_name: &'a str) -> Option> { + if let Triggers::Service { on_lost, .. } = self { + return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable(service_name, DependencyType::Service, &on_lost))) + } + None } } @@ -92,7 +100,7 @@ pub enum NegativeOutcomes<'a> { } pub trait ProcessUnit<'a> { - fn process(&mut self) -> impl std::future::Future + Send; + fn process(&'a mut self) -> impl std::future::Future + Send; } /// # an Error enum (next will be deleted and replaced) -- 2.40.1 From c3fd0dd09f20573092926eaf1b9cb1ca4a899172 Mon Sep 17 00:00:00 2001 From: prplV Date: Tue, 22 Apr 2025 11:22:07 -0400 Subject: [PATCH 32/44] files logic fixed --- noxis-rs/src/utils/files.rs | 546 ++++++++++++++++++------------------ 1 file changed, 276 insertions(+), 270 deletions(-) diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index 8ba72f7..fad24f0 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -1,286 +1,292 @@ -use crate::options::structs::{CustomError, Files}; -use super::prcs::{is_active, is_frozen}; -use inotify::{EventMask, Inotify, WatchMask}; -use std::borrow::BorrowMut; -use std::path::Path; -use std::sync::Arc; -use tokio::sync::mpsc; -use tokio::sync::mpsc::Sender as MpscSender; -use tokio::time::Duration; -use crate::options::structs::Events; + use crate::options::structs::{CustomError, Files}; + use super::prcs::{is_active, is_frozen}; + use inotify::{EventMask, Inotify, WatchMask}; + use std::borrow::BorrowMut; + use std::path::Path; + use std::sync::Arc; + use tokio::sync::mpsc; + use tokio::sync::mpsc::Sender as Sender; + use tokio::time::Duration; + use crate::options::structs::Events; -pub mod v2 { - use log::{error, info, warn}; + pub mod v2 { + use log::{error, info, warn}; - // use std::collections::HashMap; - use crate::options::structs::{FileTriggerType, FileTriggersForController as Triggers, ProcessUnit}; - use super::*; - use std::{collections::HashMap, path::Path}; + // use std::collections::HashMap; + use crate::options::structs::{FileTriggerType, FileTriggersForController as Triggers, ProcessUnit}; + use super::*; + use std::{collections::HashMap, path::Path}; - // type EventHandlers<'a> = HashMap - type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender>)>; + type MpscSender<'a> = Arc>>; + // type EventHandlers<'a> = HashMap + type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender<'a>)>; - struct FilesController<'a> { - name: &'a str, - path: String, - watcher: Option, - // obj: Arc, - triggers: EventHandlers<'a>, - } + struct FilesController<'a> { + name : &'a str, + path : String, + watcher : Option, + // obj: Arc, + triggers : EventHandlers<'a>, + code_name : String, + } - impl<'a> FilesController<'a> { - pub fn new(name: &'a str, triggers: EventHandlers<'a>) -> FilesController<'a> { - Self { - name, - path : String::new(), - watcher: None, - triggers, + impl<'a> FilesController<'a> { + pub fn new(name: &'a str, triggers: EventHandlers<'a>) -> FilesController<'a> { + Self { + name, + path : String::new(), + watcher: None, + triggers, + code_name : name.to_string(), + } + } + pub async fn with_path(&mut self, path: impl AsRef) -> anyhow::Result<()> { + self.path = path.as_ref().to_string_lossy().into_owned(); + self.watcher = { + match create_watcher(self.name, &self.path).await { + Ok(val) => Some(val), + Err(er) => { + error!("Cannot create watcher for {} ({}) due to {}", self.name, &self.path, er); + return Err(er) + } + } + }; + self.code_name = format!("{}{}", &self.path, &self.code_name); + Ok(()) + } + async fn trigger_on(&'a mut self, trigger_type: Option) { + let _ = self.triggers.iter() + .map(|(prc_name, (triggers, channel))| async { + let _ = channel.send({ + match &trigger_type { + None => { + Events::Positive(&self.code_name) + }, + Some(event) => { + info!("Event on file {} ({}) : {}. Notifying `{}` ...", self.name, &self.path, event, *prc_name); + event.event_from_file_trigger_controller(self.name, triggers) + }, + } + }).await; + }); } } - pub async fn with_path(&mut self, path: impl AsRef) -> anyhow::Result<()> { - self.path = path.as_ref().to_string_lossy().into_owned(); - self.watcher = { - match create_watcher(self.name, &self.path).await { - Ok(val) => Some(val), - Err(er) => { - error!("Cannot create watcher for {} ({}) due to {}", self.name, &self.path, er); - return Err(er) - } - } - }; - Ok(()) - } - async fn trigger_on(&mut self, trigger_type: Option) { - let _ = self.triggers.iter() - .map(|(prc_name, (triggers, channel))| async { - let _ = channel.send({ - match &trigger_type { - None => Events::Positive(self.name), - Some(event) => { - info!("Event on file {} ({}) : {}. Notifying `{}` ...", self.name, &self.path, event, *prc_name); - event.event_from_file_trigger_controller(self.name, triggers) - }, - } - }).await; - }); - } - } - impl<'a> ProcessUnit<'a> for FilesController<'a> { - async fn process(&mut self) { - // polling file check - // 1) existing check - if let Ok(_) = check_file(self.name, &self.path).await { - match &mut self.watcher { - Some(notify) => { - let mut buffer = [0; 1024]; - if let Ok(mut notif_events) = notify.read_events(&mut buffer) { - if let (recreate_watcher, true) = ( - notif_events.any(|mask| mask.mask == EventMask::DELETE_SELF), - notif_events.any(|mask| mask.mask == EventMask::MODIFY) - ) { - warn!("File {} ({}) was changed", self.name, &self.path); - if recreate_watcher { - self.watcher = match create_watcher(self.name, &self.path).await { - Ok(notifier) => Some(notifier), - Err(er) => { - error!("Failed to recreate watcher for {} ({}) due to {}", - self.name, - &self.path, - er - ); - None - }, + impl<'a> ProcessUnit<'a> for FilesController<'a> { + async fn process(&'a mut self) { + // polling file check + // 1) existing check + if let Ok(_) = check_file(self.name, &self.path).await { + match &mut self.watcher { + Some(notify) => { + let mut buffer = [0; 1024]; + if let Ok(mut notif_events) = notify.read_events(&mut buffer) { + if let (recreate_watcher, true) = ( + notif_events.any(|mask| mask.mask == EventMask::DELETE_SELF), + notif_events.any(|mask| mask.mask == EventMask::MODIFY) + ) { + warn!("File {} ({}) was changed", self.name, &self.path); + if recreate_watcher { + self.watcher = match create_watcher(self.name, &self.path).await { + Ok(notifier) => Some(notifier), + Err(er) => { + error!("Failed to recreate watcher for {} ({}) due to {}", + self.name, + &self.path, + er + ); + None + }, + } } + self.trigger_on(Some(FileTriggerType::OnChange)).await; + return; + } + } + }, + None => { /* DEAD END */}, + } + } else { + warn!("File {} ({}) was not found in determined scope", self.name, &self.path); + self.trigger_on(Some(FileTriggerType::OnDelete)).await; + return; + } + self.trigger_on(None).await; + // 2) change check + } + } + } + + /// # Fn `create_watcher` + /// ## for creating watcher on file's delete | update events + /// + /// *input* : `&str`, `&str` + /// + /// *output* : `Err` if it cant create file watcher | `Ok(watcher)` on successfull construction + /// + /// *initiator* : fn `file_handler`, fn `utils::run_daemons` + /// + /// *managing* : current file's name: &str, path in local storage to current file: &str + /// + /// *depends on* : - + /// + pub async fn create_watcher(filename: &str, path: &str) -> anyhow::Result { + let src = format!("{}{}", path, filename); + let inotify: Inotify = Inotify::init()?; + inotify.watches().add(&src, WatchMask::ALL_EVENTS)?; + Ok(inotify) + } + + /// # Fn `create_watcher` + /// ## for managing processes by checking dep files' states + /// + /// *input* : `&str`, `&[Files]`, `Arc>`, `Arc>>` + /// + /// *output* : `Err` if something with dep file is wrong | `Ok(())` on successfull dep file check + /// + /// *initiator* : fn `utils::running_handler` + /// + /// *managing* : current process's name: &str, list of dep files : `&[Files]`, atomic ref counter on sender main channel for current process `Arc>`, mut list of file watchers`Arc>>` + /// + /// *depends on* : Files + /// + pub async fn file_handler( + name: &str, + files: &[Files], + tx: Arc>, + watchers: Arc>>, + ) -> anyhow::Result<()> { + for (i, file) in files.iter().enumerate() { + // let src = format!("{}{}", file.src, file.filename); + if check_file(&file.filename, &file.src).await.is_err() { + if !is_active(name).await || is_frozen(name).await { + return Err(anyhow::Error::msg("Process is frozen or stopped")); + } + match file.triggers.on_delete.as_str() { + "stay" => { + tx.send(9).await.unwrap(); + continue; + } + "stop" => { + if is_active(name).await { + tx.send(1).await.unwrap(); + } + return Err(anyhow::Error::msg("Process was stopped")); + } + "hold" => { + if is_active(name).await { + tx.send(2).await.unwrap(); + return Err(anyhow::Error::msg("Process was frozen")); + } + } + _ => { + tokio::time::sleep(Duration::from_millis(50)).await; + tx.send(101).await.unwrap(); + return Err(anyhow::Error::msg("Impermissible character or word in file trigger")); + } + } + } else if is_active(name).await && !is_frozen(name).await { + let watchers = watchers.clone(); + // println!("mutex: {:?}", watchers); + let mut buffer = [0; 128]; + let mut mutex_guard = watchers.lock().await; + if let Some(notify) = mutex_guard.get_mut(i) { + let events = notify.read_events(&mut buffer); + // println!("{:?}", events); + if events.is_ok() { + let events: Vec = events + .unwrap() + .map(|mask| mask.mask) + .filter(|mask| { + *mask == EventMask::MODIFY || *mask == EventMask::DELETE_SELF + }) + .collect(); + for event in events { + if let EventMask::DELETE_SELF = event { + // ! warning (DELETE_SELF event) ! + // println!("! warning (DELETE_SELF event) !"); + // * watcher recreation after dealing with file recreation mechanism in text editors + let mutex = notify.borrow_mut(); + + // *mutex = create_watcher(&file.filename, &file.src).await.unwrap(); + if let Ok(watcher) = create_watcher(&file.filename, &file.src).await { + *mutex = watcher; + } + } + match file.triggers.on_change.as_str() { + "stop" => { + let _ = tx.send(7).await; + } + "restart" => { + let _ = tx.send(8).await; + } + "stay" => { + let _ = tx.send(9).await; + } + _ => { + let _ = tx.send(101).await; } - self.trigger_on(Some(FileTriggerType::OnChange)).await; - return; } } - }, - None => { /* DEAD END */}, + } } + } + } + tokio::task::yield_now().await; + Ok(()) + } + + /// # Fn `check_file` + /// ## for checking existance of current file + /// + /// *input* : `&str`, `&str` + /// + /// *output* : `Ok(())` if file exists | `Err(_)` if not | panic on fs error + /// + /// *initiator* : fn `file_handler` + /// + /// *managing* : current file's name: `&str` and current file's path in local storage: `&str` + /// + /// *depends on* : network activity + /// + pub async fn check_file(filename: &str, path: &str) -> Result<(), CustomError> { + let arc_name = Arc::new(filename.to_string()); + let arc_path = Arc::new(path.to_string()); + tokio::task::spawn_blocking(move || { + let file_concat = format!("{}{}", arc_path, arc_name); + let path = Path::new(&file_concat); + if path.exists() { + Ok(()) } else { - warn!("File {} ({}) was not found in determined scope", self.name, &self.path); - self.trigger_on(Some(FileTriggerType::OnDelete)).await; - return; + Err(CustomError::Fatal) } - self.trigger_on(None).await; - // 2) change check + }) + .await + .unwrap_or_else(|_| { + panic!("Corrupted while file check process"); + }) + } + + #[cfg(test)] + mod files_unittests { + use super::*; + #[tokio::test] + async fn try_to_create_watcher() { + let res = create_watcher("dep-file", "./tests/examples/").await; + assert!(res.is_ok()); + } + #[tokio::test] + async fn try_to_create_invalid_watcher() { + let res = create_watcher("invalid-file", "/path/to/the/no/dir").await; + assert!(res.is_err()); + } + #[tokio::test] + async fn check_existing_file() { + let res = check_file("dep-file", "./tests/examples/").await; + assert!(res.is_ok()); + } + #[tokio::test] + async fn check_non_existing_file() { + let res = check_file("invalid-file", "/path/to/the/no/dir").await; + assert!(res.is_err()); } } -} - -/// # Fn `create_watcher` -/// ## for creating watcher on file's delete | update events -/// -/// *input* : `&str`, `&str` -/// -/// *output* : `Err` if it cant create file watcher | `Ok(watcher)` on successfull construction -/// -/// *initiator* : fn `file_handler`, fn `utils::run_daemons` -/// -/// *managing* : current file's name: &str, path in local storage to current file: &str -/// -/// *depends on* : - -/// -pub async fn create_watcher(filename: &str, path: &str) -> anyhow::Result { - let src = format!("{}{}", path, filename); - let inotify: Inotify = Inotify::init()?; - inotify.watches().add(&src, WatchMask::ALL_EVENTS)?; - Ok(inotify) -} - -/// # Fn `create_watcher` -/// ## for managing processes by checking dep files' states -/// -/// *input* : `&str`, `&[Files]`, `Arc>`, `Arc>>` -/// -/// *output* : `Err` if something with dep file is wrong | `Ok(())` on successfull dep file check -/// -/// *initiator* : fn `utils::running_handler` -/// -/// *managing* : current process's name: &str, list of dep files : `&[Files]`, atomic ref counter on sender main channel for current process `Arc>`, mut list of file watchers`Arc>>` -/// -/// *depends on* : Files -/// -pub async fn file_handler( - name: &str, - files: &[Files], - tx: Arc>, - watchers: Arc>>, -) -> anyhow::Result<()> { - for (i, file) in files.iter().enumerate() { - // let src = format!("{}{}", file.src, file.filename); - if check_file(&file.filename, &file.src).await.is_err() { - if !is_active(name).await || is_frozen(name).await { - return Err(anyhow::Error::msg("Process is frozen or stopped")); - } - match file.triggers.on_delete.as_str() { - "stay" => { - tx.send(9).await.unwrap(); - continue; - } - "stop" => { - if is_active(name).await { - tx.send(1).await.unwrap(); - } - return Err(anyhow::Error::msg("Process was stopped")); - } - "hold" => { - if is_active(name).await { - tx.send(2).await.unwrap(); - return Err(anyhow::Error::msg("Process was frozen")); - } - } - _ => { - tokio::time::sleep(Duration::from_millis(50)).await; - tx.send(101).await.unwrap(); - return Err(anyhow::Error::msg("Impermissible character or word in file trigger")); - } - } - } else if is_active(name).await && !is_frozen(name).await { - let watchers = watchers.clone(); - // println!("mutex: {:?}", watchers); - let mut buffer = [0; 128]; - let mut mutex_guard = watchers.lock().await; - if let Some(notify) = mutex_guard.get_mut(i) { - let events = notify.read_events(&mut buffer); - // println!("{:?}", events); - if events.is_ok() { - let events: Vec = events - .unwrap() - .map(|mask| mask.mask) - .filter(|mask| { - *mask == EventMask::MODIFY || *mask == EventMask::DELETE_SELF - }) - .collect(); - for event in events { - if let EventMask::DELETE_SELF = event { - // ! warning (DELETE_SELF event) ! - // println!("! warning (DELETE_SELF event) !"); - // * watcher recreation after dealing with file recreation mechanism in text editors - let mutex = notify.borrow_mut(); - - // *mutex = create_watcher(&file.filename, &file.src).await.unwrap(); - if let Ok(watcher) = create_watcher(&file.filename, &file.src).await { - *mutex = watcher; - } - } - match file.triggers.on_change.as_str() { - "stop" => { - let _ = tx.send(7).await; - } - "restart" => { - let _ = tx.send(8).await; - } - "stay" => { - let _ = tx.send(9).await; - } - _ => { - let _ = tx.send(101).await; - } - } - } - } - } - } - } - tokio::task::yield_now().await; - Ok(()) -} - -/// # Fn `check_file` -/// ## for checking existance of current file -/// -/// *input* : `&str`, `&str` -/// -/// *output* : `Ok(())` if file exists | `Err(_)` if not | panic on fs error -/// -/// *initiator* : fn `file_handler` -/// -/// *managing* : current file's name: `&str` and current file's path in local storage: `&str` -/// -/// *depends on* : network activity -/// -pub async fn check_file(filename: &str, path: &str) -> Result<(), CustomError> { - let arc_name = Arc::new(filename.to_string()); - let arc_path = Arc::new(path.to_string()); - tokio::task::spawn_blocking(move || { - let file_concat = format!("{}{}", arc_path, arc_name); - let path = Path::new(&file_concat); - if path.exists() { - Ok(()) - } else { - Err(CustomError::Fatal) - } - }) - .await - .unwrap_or_else(|_| { - panic!("Corrupted while file check process"); - }) -} - -#[cfg(test)] -mod files_unittests { - use super::*; - #[tokio::test] - async fn try_to_create_watcher() { - let res = create_watcher("dep-file", "./tests/examples/").await; - assert!(res.is_ok()); - } - #[tokio::test] - async fn try_to_create_invalid_watcher() { - let res = create_watcher("invalid-file", "/path/to/the/no/dir").await; - assert!(res.is_err()); - } - #[tokio::test] - async fn check_existing_file() { - let res = check_file("dep-file", "./tests/examples/").await; - assert!(res.is_ok()); - } - #[tokio::test] - async fn check_non_existing_file() { - let res = check_file("invalid-file", "/path/to/the/no/dir").await; - assert!(res.is_err()); - } -} -- 2.40.1 From 2495fb84cfc68eff3c2fdd970838513f1159dfa0 Mon Sep 17 00:00:00 2001 From: prplV Date: Tue, 22 Apr 2025 11:22:22 -0400 Subject: [PATCH 33/44] services controller fixed (fuh) --- noxis-rs/src/utils/services.rs | 131 ++++++++++++++++++++++++++++----- 1 file changed, 114 insertions(+), 17 deletions(-) diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index 11141a7..9f54db3 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -5,24 +5,33 @@ use std::net::{TcpStream, ToSocketAddrs}; use std::sync::Arc; use tokio::sync::mpsc; use tokio::time::{Duration, Instant}; -use tokio::sync::mpsc::Sender as MpscSender; +use tokio::sync::mpsc::Sender as Sender; pub mod v2 { - use crate::options::structs::{Triggers, ProcessUnit, Events, ServiceWaitConfig, ServiceState}; + use log::info; + + use crate::options::structs::{Triggers, ProcessUnit, Events, ServiceState}; use super::*; use std::collections::{HashMap, BTreeMap, VecDeque}; + type MpscSender<'a> = Arc>>; // type EventHandlers<'a> = Vec>>; - type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender>)>; + type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender<'a>)>; // type wrapper for service wait queue - type ConnectionQueue<'a> = BTreeMap, MpscSender>)>>; + type ConnectionQueue<'a> = BTreeMap>; + #[derive(Debug)] struct ServicesController<'a> { + // i.e. yandex.ru name : &'a str, + // i.e. yandex.ru:443 access_url : String, + // "OK" or "Unavailable" state: ServiceState, - config: ServiceWaitConfig, + // btree map with key as max wait time and it's key to hashmap + config: ConnectionQueue<'a>, + // Map of processes with their (trigger and mpsc sender) event_registrator : EventHandlers<'a>, } impl<'a> ServicesController<'a> { @@ -31,40 +40,128 @@ pub mod v2 { name : "", access_url : String::new(), state : ServiceState::Unavailable, - config: ServiceWaitConfig::default(), + config: ConnectionQueue::new(), event_registrator : EventHandlers::new(), } } - pub async fn with_params(&mut self, hostname: &'a str, port: Option<&'a str>, event_registrator: EventHandlers<'a>) -> anyhow::Result<()> { + pub fn with_params( + &mut self, + hostname: &'a str, + port: Option<&'a str>, + conn_queue: ConnectionQueue<'a>, + event_reg: EventHandlers<'a>, + ) -> &mut ServicesController<'a> { self.name = hostname; self.access_url = format!("{}{}", hostname, port.map_or_else(|| "".to_string(), |p| format!(":{}", p))); - self.event_registrator = event_registrator; - Ok(()) + self.config = conn_queue; + self.event_registrator = event_reg; + self } - async fn check_state(&mut self) -> anyhow::Result<()> { + pub fn add_process( + &mut self, + proc_name: &'a str, + trigger: Triggers<'a>, + sender: MpscSender<'a>, + ) { + // queue add + if let Triggers::Service { wait, .. } = trigger { + self.config.entry(wait) + .and_modify(|el| el.push_back(proc_name)) + .or_insert({ + let mut temp = VecDeque::new(); + temp.push_back(proc_name); + temp + }); + } + // event add + self.event_registrator.entry(proc_name).or_insert((trigger, sender)); + } + async fn check_state(&self) -> anyhow::Result<()> { let mut addrs = self.access_url.to_socket_addrs()?; if !addrs.any(|a| TcpStream::connect_timeout(&a, Duration::new(1, 0)).is_ok()) { return Err(anyhow::Error::msg(format!("No access to service `{}`", &self.access_url))) } Ok(()) } - async fn trigger_on(&mut self) {} + async fn trigger_on(&'a mut self) { + match self.state { + ServiceState::Ok => { + let _ = self.event_registrator + .iter() + .map(|(_, (_, el))| async { + let _ = el.send(Events::Positive(&self.access_url)).await; + }); + }, + ServiceState::Unavailable => { + // looped check and notifying + self.looped_check().await; + }, + } + } + async fn looped_check(self: &'a mut Self) { + let longest = self.config.last_entry().unwrap(); + let longest = longest.key(); + let mut interapter = tokio::time::interval(tokio::time::Duration::from_secs(1)); + let timer = tokio::time::Instant::now(); + let mut attempt: u32 = 1; + let access_url = Arc::new(self.access_url.clone()); + // let event_registrator = &mut self.event_registrator; + + if let Err(_) = tokio::time::timeout(tokio::time::Duration::from_secs((longest + 1) as u64), async { + // let access_url = access_url.clone(); + loop { + interapter.tick().await; + info!("Trying to connect to {} (attempt: {}) ...", &access_url, attempt); + attempt += 1; + + let state_check_result = self.check_state().await; + + if state_check_result.is_ok() { + info!("Connection to {} is `OK` now", &access_url); + self.state = ServiceState::Ok; + break; + } else { + let now = timer.elapsed(); + let iterator = self.config.iter() + .filter(|(&a, _)| tokio::time::Duration::from_secs(a as u64) <= now) + .flat_map(|(_, a)| a.iter().copied()) + .collect::>(); + + for name in iterator { + let sender_opt = self.event_registrator.get(name) + .map(|(trigger, sender)| + (trigger.to_service_negative_event(name), sender) + ); + + if let Some((tr, tx)) = sender_opt { + let _ = tx.send(tr.unwrap()).await; + } else { + error!("Cannot find {} channel sender in {} service", name, &self.access_url) + } + } + } + } + }).await { + info!("Timeout of establishing connection to {}. ", &access_url); + } + } } impl<'a> ProcessUnit<'a> for ServicesController<'a> { - async fn process(&mut self) { + async fn process(&'a mut self) { // check_service(hostname, port) let current_state = self.check_state().await; match (&self.state, current_state) { (ServiceState::Unavailable, Ok(_)) => { - warn!("Unreachable for connection service `{}`. Notifying {} process(es)", &self.access_url, self.event_registrator.len()); - // - self.state = ServiceState::Unavailable; + warn!("Connection with `{}` service was established. Notifying {} process(es) ...", &self.access_url, &self.event_registrator.len()); + self.state = ServiceState::Ok; + self.trigger_on().await; }, (ServiceState::Ok, Err(_)) => { - warn!("Connection with `{}` service was established. Notifying {} process(es)", &self.access_url, self.event_registrator.len()); - // + warn!("Unreachable for connection service `{}`. Notifying {} process(es) ...", &self.access_url, &self.event_registrator.len()); self.state = ServiceState::Unavailable; + self.trigger_on().await; }, + (ServiceState::Unavailable, Err(_)) => warn!("Service {} is still unreachable", &self.access_url), _ => { /* DEAD END WITH NO INTEREST */ }, } } -- 2.40.1 From 541b0f52dd197d9c73eb2e06bb5488c31f7eb17e Mon Sep 17 00:00:00 2001 From: prplV Date: Wed, 23 Apr 2025 10:34:07 -0400 Subject: [PATCH 34/44] supervisor work --- noxis-rs/src/utils.rs | 62 ++++++++++++++++++++++++++- noxis-rs/src/utils/files.rs | 12 ++++-- noxis-rs/src/utils/prcs.rs | 77 ++++++++++++++++++++++------------ noxis-rs/src/utils/services.rs | 10 ++++- 4 files changed, 129 insertions(+), 32 deletions(-) diff --git a/noxis-rs/src/utils.rs b/noxis-rs/src/utils.rs index 468075c..3430fac 100644 --- a/noxis-rs/src/utils.rs +++ b/noxis-rs/src/utils.rs @@ -23,12 +23,58 @@ use tokio::sync::mpsc; use tokio::time::Duration; use tokio::sync::broadcast::Receiver; use tokio::sync::mpsc::{Receiver as MpscReciever, Sender as MpscSender}; +// controllers import +use prcs::v2::ProcessesController; +use files::v2::FilesController; +use services::v2::ServicesController; const GET_ID_CMD: &str = "hostname"; pub mod v2 { + use std::collections::{HashMap, LinkedList}; + use crate::options::structs::{Events, FileTriggersForController, ProcessUnit}; + use super::*; + struct Supervisor<'a> { + prcs : LinkedList>, + files : LinkedList>, + services : LinkedList>, + } + + impl<'a> Supervisor<'a> { + pub fn new(config: &'a Processes) -> Supervisor<'a> { + let mut p = LinkedList::new(); + let mut f = LinkedList::new(); + let mut s = LinkedList::new(); + + let _ = config.processes.iter() + .map(|prc| { + let (rx, tx) = mpsc::channel::>(10); + let temp = ProcessesController::new(&prc.name, tx); + if !p.contains(&temp) { + p.push_back(temp); + } + let rx = Arc::new(rx); + // files + let _ = prc.dependencies.files.iter() + .map(|file| async { + let mut hm = HashMap::new(); + let triggers = FileTriggersForController { on_change: &file.triggers.on_change, on_delete: &file.triggers.on_delete}; + hm.insert(&prc.name, (triggers, rx.clone())); + let tempfile = FilesController::new(&file.filename, hm).with_path(file.src).await; + }); + // servs + let _ = prc.dependencies.services.iter() + .map(|serv| { + + }); + }); + + Supervisor { prcs: p, files: f, services: s } + } + } + // spawn tasks // spawn prc // spawn files @@ -53,8 +99,22 @@ pub mod v2 { // LinkedList // LinkedList Ok(()) - } + } + + // async fn generate_controllers<'a>(config: Processes) -> (HashSet>, HashSet>, HashSet>) { + // let mut prcs: HashSet> = HashSet::new(); + // let mut files: HashSet> = HashSet::new(); + // let mut services: HashSet> = HashSet::new(); + // for prc in config.processes { + // let (rx, tx) = mpsc::channel::>(10); + // // let new_prc = ProcessesController::new(&prc.name, tx).with_exe(prc.path); + // let mut new_prc = ProcessesController::new("&prc.name", tx).with_exe(prc.path); + // let a = new_prc.process().await; + + // } + // (prcs, files, services) + // } // spawn prc check with semaphore check async fn prcs_monitoriing() -> anyhow::Result<()> { Ok(()) } diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index fad24f0..00d4033 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -21,7 +21,7 @@ // type EventHandlers<'a> = HashMap type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender<'a>)>; - struct FilesController<'a> { + pub struct FilesController<'a> { name : &'a str, path : String, watcher : Option, @@ -30,6 +30,12 @@ code_name : String, } + impl<'a> PartialEq for FilesController<'a> { + fn eq(&self, other: &Self) -> bool { + self.path == other.path && self.name == other.name + } + } + impl<'a> FilesController<'a> { pub fn new(name: &'a str, triggers: EventHandlers<'a>) -> FilesController<'a> { Self { @@ -40,7 +46,7 @@ code_name : name.to_string(), } } - pub async fn with_path(&mut self, path: impl AsRef) -> anyhow::Result<()> { + pub async fn with_path(mut self, path: impl AsRef) -> anyhow::Result> { self.path = path.as_ref().to_string_lossy().into_owned(); self.watcher = { match create_watcher(self.name, &self.path).await { @@ -52,7 +58,7 @@ } }; self.code_name = format!("{}{}", &self.path, &self.code_name); - Ok(()) + Ok(self) } async fn trigger_on(&'a mut self, trigger_type: Option) { let _ = self.triggers.iter() diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index c190303..b2a966f 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -2,24 +2,47 @@ use log::{error, warn}; use std::process::{Command, Output}; use std::sync::Arc; use tokio::time::Duration; -use crate::options::structs::{TrackingProcess, ProcessState, Events, NegativeOutcomes, ProcessUnit}; +use crate::options::structs::{ProcessState, Events, NegativeOutcomes, ProcessUnit}; use std::collections::HashSet; use tokio::sync::mpsc::Receiver as MpscReciever; pub mod v2 { use log::info; use crate::options::structs::DependencyType; + use std::path::Path; use super::*; - pub struct ProcessController<'a> { + + pub struct ProcessesController<'a> { name: &'a str, - obj: Arc, + bin: String, + // obj: Arc, state: ProcessState, event_reader: MpscReciever>, negative_events: HashSet<&'a str>, } - impl<'a> ProcessController<'a> { + impl<'a> PartialEq for ProcessesController<'a> { + fn eq(&self, other: &Self) -> bool { + self.bin == other.bin + } + } + + impl<'a> ProcessesController<'a> { + pub fn new(name: &'a str, event_reader: MpscReciever>) -> ProcessesController<'a> { + ProcessesController { + name, + bin: String::new(), + state : ProcessState::Stopped, + event_reader, + negative_events : HashSet::new(), + } + } + pub fn with_exe(mut self, bin: impl AsRef) -> ProcessesController<'a> { + self.bin = bin.as_ref().to_string_lossy().into_owned(); + self + } + async fn trigger_on(&mut self, dep_name: &str, trigger: &str, dep_type: DependencyType) { match trigger { "stay" => { @@ -41,7 +64,7 @@ pub mod v2 { }, "restart" => { info!("Event on {} `{}` for {}. Restarting ...", dep_type, dep_name, self.name); - let _ = restart_process(self.name, &self.obj.path).await; + let _ = restart_process(self.name, &self.bin).await; }, _ => error!("Impermissible trigger in file-trigger for {}. Ignoring event ...", self.name), } @@ -49,8 +72,29 @@ pub mod v2 { } } - impl<'a> ProcessUnit<'a> for ProcessController<'a> { + impl<'a> ProcessUnit<'a> for ProcessesController<'a> { async fn process(&mut self) { + if self.negative_events.len() == 0 { + match self.state { + ProcessState::Holding => { + info!("No negative dependecies events on {} process. Unfreezing ...", self.name); + if let Err(er) = unfreeze_process(self.name).await { + error!("Cannot unfreeze process {} : {}", self.name, er); + } else { + self.state = ProcessState::Pending; + } + }, + ProcessState::Stopped => { + info!("No negative dependecies events on {} process. Starting ...", self.name); + if let Err(er) = start_process(self.name, &self.bin).await { + error!("Cannot start process {} : {}", self.name, er); + } else { + self.state = ProcessState::Pending; + } + }, + _ => {}, + } + } while let Ok(event) = self.event_reader.try_recv() { match event { Events::Positive(target) => { @@ -77,27 +121,6 @@ pub mod v2 { }, } } - if self.negative_events.len() == 0 { - match self.state { - ProcessState::Holding => { - info!("No negative dependecies events on {} process. Unfreezing ...", self.name); - if let Err(er) = unfreeze_process(self.name).await { - error!("Cannot unfreeze process {} : {}", self.name, er); - } else { - self.state = ProcessState::Pending; - } - }, - ProcessState::Stopped => { - info!("No negative dependecies events on {} process. Starting ...", self.name); - if let Err(er) = start_process(self.name, &self.obj.path).await { - error!("Cannot start process {} : {}", self.name, er); - } else { - self.state = ProcessState::Pending; - } - }, - _ => {}, - } - } } } } diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index 9f54db3..0dd56f6 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -22,8 +22,9 @@ pub mod v2 { type ConnectionQueue<'a> = BTreeMap>; #[derive(Debug)] - struct ServicesController<'a> { + pub struct ServicesController<'a> { // i.e. yandex.ru + #[allow(unused)] name : &'a str, // i.e. yandex.ru:443 access_url : String, @@ -34,6 +35,13 @@ pub mod v2 { // Map of processes with their (trigger and mpsc sender) event_registrator : EventHandlers<'a>, } + + impl<'a> PartialEq for ServicesController<'a> { + fn eq(&self, other: &Self) -> bool { + self.access_url == other.access_url + } + } + impl<'a> ServicesController<'a> { pub fn new() -> ServicesController<'a> { ServicesController { -- 2.40.1 From 6d56d1e39c38cc9ca17433d68420a680210fe313 Mon Sep 17 00:00:00 2001 From: prplV Date: Fri, 25 Apr 2025 10:56:42 -0400 Subject: [PATCH 35/44] big change but it's still not working in utils.rs --- noxis-rs/Cargo.toml | 2 + noxis-rs/settings.json | 1 - noxis-rs/src/main.rs | 14 +- noxis-rs/src/options/config.rs | 4 +- noxis-rs/src/options/structs.rs | 10 +- noxis-rs/src/utils.rs | 252 +++++++++++++++++++------------- noxis-rs/src/utils/files.rs | 22 ++- noxis-rs/src/utils/prcs.rs | 7 +- noxis-rs/src/utils/services.rs | 214 ++++++++++++++------------- 9 files changed, 308 insertions(+), 218 deletions(-) diff --git a/noxis-rs/Cargo.toml b/noxis-rs/Cargo.toml index 8207b26..8e35c7d 100644 --- a/noxis-rs/Cargo.toml +++ b/noxis-rs/Cargo.toml @@ -18,3 +18,5 @@ sysinfo = "0.32.0" tokio = { version = "1.38.0", features = ["full", "time"] } noxis-cli = { path = "../noxis-cli" } dotenv = "0.15.0" +futures = "0.3.31" +async-trait = "0.1.88" diff --git a/noxis-rs/settings.json b/noxis-rs/settings.json index bb5f44a..496fe2a 100644 --- a/noxis-rs/settings.json +++ b/noxis-rs/settings.json @@ -22,7 +22,6 @@ "port": 443, "triggers": { "wait": 10, - "delay": 2, "onLost": "restart" } } diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index aa9eeb1..79e5b8c 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -58,7 +58,19 @@ async fn main() -> anyhow::Result<()>{ handler.push(ctrlc); let monitoring = tokio::spawn(async move { - if let Err(er) = init_monitoring(&mut rx_brd).await { + let config = if !rx_brd.is_empty() { + rx_brd.recv().await? + } else { + let mut tick = tokio::time::interval(Duration::from_millis(500)); + loop { + tick.tick().await; + break match rx_brd.try_recv() { + Ok(conf) => conf, + Err(_) => continue, + } + } + }; + if let Err(er) = init_monitoring(config).await { error!("Monitoring mod failed due to {}", er); } }); diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 9c2e69f..9e21042 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -276,7 +276,7 @@ pub mod v2 { // 100% local exists here // create watcher on local config file - match create_watcher("", local_config_path).await { + match create_watcher("", local_config_path) { Ok(mut watcher) => { loop { let mut need_to_export_config = false; @@ -340,7 +340,7 @@ pub mod v2 { // recreation watcher (draining activity buffer mechanism) // if local config file was deleted and recreated // if local config file was modified locally - match create_watcher("", local_config_path).await { + match create_watcher("", local_config_path) { Ok(new) => watcher = new, Err(er) => error!("Cannot create new watcher due to {}", er), } diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 7d4d81b..c253282 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -2,6 +2,7 @@ use std::net::Ipv4Addr; use serde::{Deserialize, Serialize}; +use async_trait::async_trait; pub enum DependencyType { File, @@ -71,6 +72,7 @@ impl<'a> Triggers<'a> { } } +#[derive(Debug)] pub struct FileTriggersForController<'a> { pub on_change: &'a str, pub on_delete: &'a str } pub struct ServiceTriggersForController<'a>(&'a str); @@ -83,6 +85,7 @@ impl std::fmt::Display for DependencyType { } } +#[derive(Debug)] pub enum ProcessState { Pending, Holding, @@ -99,10 +102,10 @@ pub enum NegativeOutcomes<'a> { ServiceIsUnreachable(&'a str, DependencyType, &'a str), } +#[async_trait] pub trait ProcessUnit<'a> { - fn process(&'a mut self) -> impl std::future::Future + Send; + async fn process(&'a mut self); } - /// # an Error enum (next will be deleted and replaced) pub enum CustomError { Fatal, @@ -251,7 +254,7 @@ pub struct Files { #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Services { pub hostname: String, - pub port: u32, + pub port: Option, pub triggers: ServiceTriggers, } @@ -275,7 +278,6 @@ pub struct Services { #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ServiceTriggers { pub wait: u32, - pub delay: u32, #[serde(rename = "onLost")] pub on_lost: String, } diff --git a/noxis-rs/src/utils.rs b/noxis-rs/src/utils.rs index 3430fac..3018f69 100644 --- a/noxis-rs/src/utils.rs +++ b/noxis-rs/src/utils.rs @@ -7,35 +7,42 @@ pub mod services; // TODO : saving current flags state use crate::options::structs::{CustomError, TrackingProcess, Processes}; -use files::create_watcher; -use files::file_handler; -use inotify::Inotify; +// use files::create_watcher; +// use files::file_handler; +// use inotify::Inotify; use log::{error, warn, info}; use prcs::{ freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process, unfreeze_process, }; -use services::service_handler; +// use services::service_handler; use std::process::Command; use std::sync::Arc; -use tokio::join; +// use tokio::join; use tokio::sync::mpsc; use tokio::time::Duration; use tokio::sync::broadcast::Receiver; -use tokio::sync::mpsc::{Receiver as MpscReciever, Sender as MpscSender}; +// use tokio::sync::mpsc::{Receiver as MpscReciever, Sender as MpscSender}; // controllers import use prcs::v2::ProcessesController; use files::v2::FilesController; use services::v2::ServicesController; +use async_trait::async_trait; const GET_ID_CMD: &str = "hostname"; pub mod v2 { - use std::collections::{HashMap, LinkedList}; - use crate::options::structs::{Events, FileTriggersForController, ProcessUnit}; - + use std::collections::{BTreeMap, HashMap, LinkedList, VecDeque}; + use crate::options::structs::{Events, FileTriggersForController, ProcessUnit, Triggers}; use super::*; + enum ControllerResult<'a> { + Process(Option>), + File(Option>), + Service(Option>), + } + + #[derive(Debug)] struct Supervisor<'a> { prcs : LinkedList>, files : LinkedList>, @@ -43,35 +50,92 @@ pub mod v2 { } impl<'a> Supervisor<'a> { - pub fn new(config: &'a Processes) -> Supervisor<'a> { - let mut p = LinkedList::new(); - let mut f = LinkedList::new(); - let mut s = LinkedList::new(); - + pub fn new() -> Supervisor<'a> { + Supervisor { prcs: LinkedList::new(), files: LinkedList::new(), services: LinkedList::new()} + } + pub async fn with_config(mut self, config: &'a Processes) -> Supervisor<'a> { let _ = config.processes.iter() - .map(|prc| { + .for_each(|prc| { let (rx, tx) = mpsc::channel::>(10); - let temp = ProcessesController::new(&prc.name, tx); - if !p.contains(&temp) { - p.push_back(temp); + let temp = ProcessesController::new(&prc.name, tx).with_exe(&prc.path); + if !self.prcs.contains(&temp) { + self.prcs.push_back(temp); } let rx = Arc::new(rx); - // files + let _ = prc.dependencies.files.iter() - .map(|file| async { + .for_each(|file| { let mut hm = HashMap::new(); let triggers = FileTriggersForController { on_change: &file.triggers.on_change, on_delete: &file.triggers.on_delete}; - hm.insert(&prc.name, (triggers, rx.clone())); - let tempfile = FilesController::new(&file.filename, hm).with_path(file.src).await; + hm.insert(prc.name.as_str(), (triggers, rx.clone())); + + let tempfile = FilesController::new(&file.filename.as_str(), hm) + .with_path(&file.src); + + + if let Ok(file) = tempfile { + if let Some(current_file) = self.files.iter_mut().find(|a| &&file == a) { + current_file.add_event(file); + } else { + self.files.push_back(file); + } + } }); + // servs let _ = prc.dependencies.services.iter() - .map(|serv| { - + .for_each(|serv| { + let access_url = ServicesController::get_access_url(&serv.hostname, serv.port.as_ref()); + // preparations + let rx = rx.clone(); + let serv_cont = ServicesController::new().with_access_name( + &serv.hostname, + access_url + ); + // triggers + let triggers = Triggers::new_service(&serv.triggers.on_lost, serv.triggers.wait); + + if let Some(proc) = self.services.iter_mut().find(|a| &&serv_cont == a) { + proc.add_process(&prc.name, triggers, rx); + } else { + // vecdeque for queue + let mut vec: VecDeque<&'a str> = VecDeque::new(); + vec.push_back(&prc.name); + // connection_queue + let mut connection_queue: BTreeMap> = BTreeMap::new(); + connection_queue.insert(serv.triggers.wait, vec); + // event_reg + let mut hm = HashMap::new(); + hm.insert(prc.name.as_str(), (triggers, rx)); + + let serv_cont = serv_cont.with_params(connection_queue, hm); + self.services.push_back(serv_cont); + } }); }); + self + } + pub fn get_stats(&self) -> String { + format!("processes: {}, files: {}, services: {}", self.prcs.len(),self.files.len(), self.services.len()) + } + async fn proccess_prc(&mut self) { - Supervisor { prcs: p, files: f, services: s } + } + } + + #[async_trait] + impl<'a> ProcessUnit<'a> for Supervisor<'a> { + async fn process(&'a mut self) { + info!("Initializing monitoring ..."); + loop { + // let mut tasks: Vec> = vec![]; + // let (mut prc, mut file, mut serv) = (self.prcs.pop_front().unwrap(), self.files.pop_front().unwrap(), self.services.pop_front().unwrap()); + // let res = tokio::join!(prc.process(), file.process(), serv.process()); + if let Some(mut val) = self.prcs.pop_front() { + tokio::spawn(async move {val.process().await;}).await; + } + tokio::time::sleep(Duration::from_millis(100)).await; + } } } @@ -81,23 +145,11 @@ pub mod v2 { // spawn services // ## for ... i.await in loop pub async fn init_monitoring( - local_config: &mut Receiver, + config: Processes ) -> anyhow::Result<()> { - let config = if !local_config.is_empty() { - local_config.recv().await? - } else { - let mut tick = tokio::time::interval(Duration::from_millis(500)); - loop { - tick.tick().await; - break match local_config.try_recv() { - Ok(conf) => conf, - Err(_) => continue, - } - } - }; - info!("Processing {} processes ...", config.processes.len()); - // LinkedList - // LinkedList + let mut supervisor = Supervisor::new().with_config(&config).await; + info!("Monitoring: {} ", &supervisor.get_stats()); + supervisor.process().await; Ok(()) } @@ -140,37 +192,37 @@ pub mod v2 { /// /// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process /// -pub async fn run_daemons( - proc: Arc, - tx: Arc>, - rx: &mut mpsc::Receiver, -) { - // creating watchers + ---buffers--- - let mut watchers: Vec = vec![]; - for file in proc.dependencies.files.clone().into_iter() { - if let Ok(watcher) = create_watcher(&file.filename, &file.src).await { - watchers.push(watcher); - } else { - let _ = tx.send(121).await; - } - // watchers.push(create_watcher(&file.filename, &file.src).await.unwrap()); - } - let watchers_clone: Arc>> = - Arc::new(tokio::sync::Mutex::new(watchers)); +// pub async fn run_daemons( +// proc: Arc, +// tx: Arc>, +// rx: &mut mpsc::Receiver, +// ) { +// // creating watchers + ---buffers--- +// let mut watchers: Vec = vec![]; +// for file in proc.dependencies.files.clone().into_iter() { +// if let Ok(watcher) = create_watcher(&file.filename, &file.src).await { +// watchers.push(watcher); +// } else { +// let _ = tx.send(121).await; +// } +// // watchers.push(create_watcher(&file.filename, &file.src).await.unwrap()); +// } +// let watchers_clone: Arc>> = +// Arc::new(tokio::sync::Mutex::new(watchers)); - loop { - let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone()); - tokio::select! { - _ = run_hand => continue, - _val = rx.recv() => { - if process_protocol_symbol(proc.clone(), _val.unwrap()).await.is_err() { - return; - } - }, - } - tokio::task::yield_now().await; - } -} +// loop { +// let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone()); +// tokio::select! { +// _ = run_hand => continue, +// _val = rx.recv() => { +// if process_protocol_symbol(proc.clone(), _val.unwrap()).await.is_err() { +// return; +// } +// }, +// } +// tokio::task::yield_now().await; +// } +// } async fn process_protocol_symbol(proc: Arc, val: u8) -> Result<(), CustomError>{ match val { @@ -300,36 +352,36 @@ async fn process_protocol_symbol(proc: Arc, val: u8) -> Result< /// /// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}` /// -pub async fn running_handler( - prc: Arc, - tx: Arc>, - watchers: Arc>>, -) { - // services and files check (once) - let files_check = file_handler( - &prc.name, - &prc.dependencies.files, - tx.clone(), - watchers.clone(), - ); - let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone()); +// pub async fn running_handler( +// prc: Arc, +// tx: Arc>, +// watchers: Arc>>, +// ) { +// // services and files check (once) +// let files_check = file_handler( +// &prc.name, +// &prc.dependencies.files, +// tx.clone(), +// watchers.clone(), +// ); +// let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone()); - let res = join!(files_check, services_check); - // if inactive -> spawn checks -> active is true - if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() { - if start_process(&prc.name, &prc.path).await.is_err() { - tx.send(3).await.unwrap(); - return; - } - } - // if frozen -> spawn checks -> unfreeze is true - else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() { - tx.send(10).await.unwrap(); - return; - } - // tokio::time::sleep(Duration::from_millis(100)).await; - tokio::task::yield_now().await; -} +// let res = join!(files_check, services_check); +// // if inactive -> spawn checks -> active is true +// if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() { +// if start_process(&prc.name, &prc.path).await.is_err() { +// tx.send(3).await.unwrap(); +// return; +// } +// } +// // if frozen -> spawn checks -> unfreeze is true +// else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() { +// tx.send(10).await.unwrap(); +// return; +// } +// // tokio::time::sleep(Duration::from_millis(100)).await; +// tokio::task::yield_now().await; +// } // todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}' /// # Fn `get_container_id` diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index 00d4033..435813c 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -8,6 +8,7 @@ use tokio::sync::mpsc::Sender as Sender; use tokio::time::Duration; use crate::options::structs::Events; + use async_trait::async_trait; pub mod v2 { use log::{error, info, warn}; @@ -21,6 +22,7 @@ // type EventHandlers<'a> = HashMap type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender<'a>)>; + #[derive(Debug)] pub struct FilesController<'a> { name : &'a str, path : String, @@ -46,10 +48,10 @@ code_name : name.to_string(), } } - pub async fn with_path(mut self, path: impl AsRef) -> anyhow::Result> { + pub fn with_path(mut self, path: impl AsRef) -> anyhow::Result> { self.path = path.as_ref().to_string_lossy().into_owned(); self.watcher = { - match create_watcher(self.name, &self.path).await { + match create_watcher(self.name, &self.path) { Ok(val) => Some(val), Err(er) => { error!("Cannot create watcher for {} ({}) due to {}", self.name, &self.path, er); @@ -60,6 +62,11 @@ self.code_name = format!("{}{}", &self.path, &self.code_name); Ok(self) } + pub fn add_event(&mut self, file_controller : FilesController<'a>) { + for (k, v) in file_controller.triggers { + self.triggers.entry(k).or_insert(v); + } + } async fn trigger_on(&'a mut self, trigger_type: Option) { let _ = self.triggers.iter() .map(|(prc_name, (triggers, channel))| async { @@ -77,6 +84,7 @@ }); } } + #[async_trait] impl<'a> ProcessUnit<'a> for FilesController<'a> { async fn process(&'a mut self) { // polling file check @@ -92,7 +100,7 @@ ) { warn!("File {} ({}) was changed", self.name, &self.path); if recreate_watcher { - self.watcher = match create_watcher(self.name, &self.path).await { + self.watcher = match create_watcher(self.name, &self.path) { Ok(notifier) => Some(notifier), Err(er) => { error!("Failed to recreate watcher for {} ({}) due to {}", @@ -135,7 +143,7 @@ /// /// *depends on* : - /// - pub async fn create_watcher(filename: &str, path: &str) -> anyhow::Result { + pub fn create_watcher(filename: &str, path: &str) -> anyhow::Result { let src = format!("{}{}", path, filename); let inotify: Inotify = Inotify::init()?; inotify.watches().add(&src, WatchMask::ALL_EVENTS)?; @@ -214,7 +222,7 @@ let mutex = notify.borrow_mut(); // *mutex = create_watcher(&file.filename, &file.src).await.unwrap(); - if let Ok(watcher) = create_watcher(&file.filename, &file.src).await { + if let Ok(watcher) = create_watcher(&file.filename, &file.src) { *mutex = watcher; } } @@ -277,12 +285,12 @@ use super::*; #[tokio::test] async fn try_to_create_watcher() { - let res = create_watcher("dep-file", "./tests/examples/").await; + let res = create_watcher("dep-file", "./tests/examples/"); assert!(res.is_ok()); } #[tokio::test] async fn try_to_create_invalid_watcher() { - let res = create_watcher("invalid-file", "/path/to/the/no/dir").await; + let res = create_watcher("invalid-file", "/path/to/the/no/dir"); assert!(res.is_err()); } #[tokio::test] diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index b2a966f..bb0b94d 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -5,6 +5,7 @@ use tokio::time::Duration; use crate::options::structs::{ProcessState, Events, NegativeOutcomes, ProcessUnit}; use std::collections::HashSet; use tokio::sync::mpsc::Receiver as MpscReciever; +use async_trait::async_trait; pub mod v2 { use log::info; @@ -13,6 +14,7 @@ pub mod v2 { use super::*; + #[derive(Debug)] pub struct ProcessesController<'a> { name: &'a str, bin: String, @@ -70,10 +72,11 @@ pub mod v2 { } tokio::time::sleep(Duration::from_micros(100)).await; } - } + } + #[async_trait] impl<'a> ProcessUnit<'a> for ProcessesController<'a> { - async fn process(&mut self) { + async fn process(&'a mut self) { if self.negative_events.len() == 0 { match self.state { ProcessState::Holding => { diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index 0dd56f6..9cec823 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -6,6 +6,7 @@ use std::sync::Arc; use tokio::sync::mpsc; use tokio::time::{Duration, Instant}; use tokio::sync::mpsc::Sender as Sender; +use async_trait::async_trait; pub mod v2 { use log::info; @@ -52,19 +53,29 @@ pub mod v2 { event_registrator : EventHandlers::new(), } } - pub fn with_params( - &mut self, + pub fn with_access_name( + mut self, hostname: &'a str, - port: Option<&'a str>, + access_url: String, + ) -> ServicesController<'a> { + self.name = hostname; + self.access_url = access_url; + self + } + + pub fn with_params( + mut self, conn_queue: ConnectionQueue<'a>, event_reg: EventHandlers<'a>, - ) -> &mut ServicesController<'a> { - self.name = hostname; - self.access_url = format!("{}{}", hostname, port.map_or_else(|| "".to_string(), |p| format!(":{}", p))); + ) -> ServicesController<'a> { self.config = conn_queue; self.event_registrator = event_reg; self } + + pub fn get_access_url(hostname: &'a str, port: Option<&u32>) -> String { + format!("{}{}", hostname, port.map_or_else(|| "".to_string(), |p| format!(":{}", p))) + } pub fn add_process( &mut self, proc_name: &'a str, @@ -154,6 +165,7 @@ pub mod v2 { } } } + #[async_trait] impl<'a> ProcessUnit<'a> for ServicesController<'a> { async fn process(&'a mut self) { // check_service(hostname, port) @@ -189,53 +201,53 @@ pub mod v2 { /// /// *depends on* : fn `check_service`, fn `utils::prcs::is_active`, fn `utils::prcs::is_frozen`, fn `looped_service_connecting` /// -pub async fn service_handler( - name: &str, - services: &Vec, - tx: Arc>, -) -> Result<(), CustomError> { - // println!("service daemon on {}", name); - for serv in services { - if check_service(&serv.hostname, &serv.port).await.is_err() { - if !is_active(name).await || is_frozen(name).await { - return Err(CustomError::Fatal); - } - error!( - "Service {}:{} is unreachable for process {}", - &serv.hostname, &serv.port, &name - ); - match serv.triggers.on_lost.as_str() { - "stay" => { - tx.send(4).await.unwrap(); - continue; - } - "stop" => { - if looped_service_connecting(name, serv).await.is_err() { - tx.send(5).await.unwrap(); - tokio::task::yield_now().await; - return Err(CustomError::Fatal); - } - } - "hold" => { - // if is_frozen(name).await { - // return Err(CustomError::Fatal); - // } - if looped_service_connecting(name, serv).await.is_err() { - tx.send(6).await.unwrap(); - tokio::task::yield_now().await; - return Err(CustomError::Fatal); - } - } - _ => { - tx.send(101).await.unwrap(); - return Err(CustomError::Fatal); - } - } - } - } - tokio::time::sleep(Duration::from_millis(100)).await; - Ok(()) -} +// pub async fn service_handler( +// name: &str, +// services: &Vec, +// tx: Arc>, +// ) -> Result<(), CustomError> { +// // println!("service daemon on {}", name); +// for serv in services { +// if check_service(&serv.hostname, &serv.port).await.is_err() { +// if !is_active(name).await || is_frozen(name).await { +// return Err(CustomError::Fatal); +// } +// error!( +// "Service {}:{} is unreachable for process {}", +// &serv.hostname, &serv.port, &name +// ); +// match serv.triggers.on_lost.as_str() { +// "stay" => { +// tx.send(4).await.unwrap(); +// continue; +// } +// "stop" => { +// if looped_service_connecting(name, serv).await.is_err() { +// tx.send(5).await.unwrap(); +// tokio::task::yield_now().await; +// return Err(CustomError::Fatal); +// } +// } +// "hold" => { +// // if is_frozen(name).await { +// // return Err(CustomError::Fatal); +// // } +// if looped_service_connecting(name, serv).await.is_err() { +// tx.send(6).await.unwrap(); +// tokio::task::yield_now().await; +// return Err(CustomError::Fatal); +// } +// } +// _ => { +// tx.send(101).await.unwrap(); +// return Err(CustomError::Fatal); +// } +// } +// } +// } +// tokio::time::sleep(Duration::from_millis(100)).await; +// Ok(()) +// } /// # Fn `looped_service_connecting` /// ## for service's state check in loop (with delay and restriction of attempts) @@ -250,54 +262,54 @@ pub async fn service_handler( /// /// *depends on* : fn `check_service` /// -async fn looped_service_connecting(name: &str, serv: &Services) -> Result<(), CustomError> { - if serv.triggers.wait == 0 { - loop { - tokio::time::sleep(Duration::from_secs(serv.triggers.delay.into())).await; - warn!( - "Attempting to connect from {} process to {}:{}", - &name, &serv.hostname, &serv.port - ); - match check_service(&serv.hostname, &serv.port).await { - Ok(_) => { - log::info!( - "Successfully connected to {} from {} process!", - &serv.hostname, - &name - ); - break; - } - Err(_) => { - tokio::task::yield_now().await; - } - } - } - Ok(()) - } else { - let start = Instant::now(); - while start.elapsed().as_secs() < serv.triggers.wait.into() { - tokio::time::sleep(Duration::from_secs(serv.triggers.delay.into())).await; - warn!( - "Attempting to connect from {} process to {}:{}", - &name, &serv.hostname, &serv.port - ); - match check_service(&serv.hostname, &serv.port).await { - Ok(_) => { - log::info!( - "Successfully connected to {} from {} process!", - &serv.hostname, - &name - ); - return Ok(()); - } - Err(_) => { - tokio::task::yield_now().await; - } - } - } - Err(CustomError::Fatal) - } -} +// async fn looped_service_connecting(name: &str, serv: &Services) -> Result<(), CustomError> { +// if serv.triggers.wait == 0 { +// loop { +// tokio::time::sleep(Duration::from_secs(serv.triggers.delay.into())).await; +// warn!( +// "Attempting to connect from {} process to {}:{}", +// &name, &serv.hostname, &serv.port +// ); +// match check_service(&serv.hostname, &serv.port).await { +// Ok(_) => { +// log::info!( +// "Successfully connected to {} from {} process!", +// &serv.hostname, +// &name +// ); +// break; +// } +// Err(_) => { +// tokio::task::yield_now().await; +// } +// } +// } +// Ok(()) +// } else { +// let start = Instant::now(); +// while start.elapsed().as_secs() < serv.triggers.wait.into() { +// tokio::time::sleep(Duration::from_secs(serv.triggers.delay.into())).await; +// warn!( +// "Attempting to connect from {} process to {}:{}", +// &name, &serv.hostname, &serv.port +// ); +// match check_service(&serv.hostname, &serv.port).await { +// Ok(_) => { +// log::info!( +// "Successfully connected to {} from {} process!", +// &serv.hostname, +// &name +// ); +// return Ok(()); +// } +// Err(_) => { +// tokio::task::yield_now().await; +// } +// } +// } +// Err(CustomError::Fatal) +// } +// } /// # Fn `check_service` /// ## for check current service's availiability -- 2.40.1 From 052448a7b9de75da26a5f289c7b3b1974d58a83b Mon Sep 17 00:00:00 2001 From: prplV Date: Sun, 4 May 2025 08:59:25 -0400 Subject: [PATCH 36/44] OWNERSHIP FIX: structs with Arc (instead of &'_ str) --- noxis-rs/src/options/structs.rs | 47 +++++++++++++++++---------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index c253282..413050e 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -3,6 +3,7 @@ use std::net::Ipv4Addr; use serde::{Deserialize, Serialize}; use async_trait::async_trait; +use std::sync::Arc; pub enum DependencyType { File, @@ -37,44 +38,44 @@ impl std::fmt::Display for FileTriggerType { } impl<'a> FileTriggerType { - pub fn event(&self, file_name: &'a str, trigger: &'a str) -> Events<'a> { + pub fn event(&self, file_name: Arc, trigger: Arc) -> Events { return match self { FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)), FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)), } } - pub fn event_from_file_trigger_controller(&self, file_name: &'a str, trigger: &FileTriggersForController<'a>) -> Events<'a> { + pub fn event_from_file_trigger_controller(&self, file_name: Arc, trigger: &FileTriggersForController) -> Events { return match self { - FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_change)), - FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_delete)), + FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_change.clone())), + FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_delete.clone())), } } } #[derive(Debug)] -pub enum Triggers<'a> { - File{ on_change: &'a str, on_delete: &'a str }, - Service{on_lost: &'a str, wait: u32}, +pub enum Triggers { + File { on_change: Arc, on_delete: Arc }, + Service {on_lost: Arc, wait: u32}, } -impl<'a> Triggers<'a> { - pub fn new_file(on_change: &'a str, on_delete: &'a str) -> Triggers<'a> { +impl Triggers { + pub fn new_file(on_change: Arc, on_delete: Arc) -> Triggers { Triggers::File { on_change, on_delete } } - pub fn new_service(on_lost: &'a str, wait_time: u32) -> Triggers<'a> { + pub fn new_service(on_lost: Arc, wait_time: u32) -> Triggers { Triggers::Service{on_lost, wait: wait_time} } - pub fn to_service_negative_event(&'a self, service_name: &'a str) -> Option> { + pub fn to_service_negative_event(&self, service_name: Arc) -> Option { if let Triggers::Service { on_lost, .. } = self { - return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable(service_name, DependencyType::Service, &on_lost))) + return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable(service_name, DependencyType::Service, on_lost.clone()))) } None } } #[derive(Debug)] -pub struct FileTriggersForController<'a> { pub on_change: &'a str, pub on_delete: &'a str } -pub struct ServiceTriggersForController<'a>(&'a str); +pub struct FileTriggersForController{ pub on_change: Arc, pub on_delete: Arc } +pub struct ServiceTriggersForController(Arc); impl std::fmt::Display for DependencyType { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -92,19 +93,19 @@ pub enum ProcessState { Stopped, StoppedByCli, } -pub enum Events<'a> { - Positive(&'a str), - Negative(NegativeOutcomes<'a>) +pub enum Events { + Positive(Arc), + Negative(NegativeOutcomes) } -pub enum NegativeOutcomes<'a> { - FileWasChanged(&'a str, DependencyType, &'a str), - FileWasMovedOrDeleted(&'a str, DependencyType, &'a str), - ServiceIsUnreachable(&'a str, DependencyType, &'a str), +pub enum NegativeOutcomes { + FileWasChanged(Arc, DependencyType, Arc), + FileWasMovedOrDeleted(Arc, DependencyType, Arc), + ServiceIsUnreachable(Arc, DependencyType, Arc), } #[async_trait] -pub trait ProcessUnit<'a> { - async fn process(&'a mut self); +pub trait ProcessUnit { + async fn process(&mut self); } /// # an Error enum (next will be deleted and replaced) pub enum CustomError { -- 2.40.1 From 3c22a67052507031207b5b3b57fcf0138c4188bf Mon Sep 17 00:00:00 2001 From: prplV Date: Sun, 4 May 2025 08:59:55 -0400 Subject: [PATCH 37/44] OWNERSHIP FIX: services with ownership using Arc --- noxis-rs/src/utils/services.rs | 82 +++++++++++++++++----------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index 9cec823..0b048e5 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -3,8 +3,7 @@ use super::prcs::{is_active, is_frozen}; use log::{error, warn}; use std::net::{TcpStream, ToSocketAddrs}; use std::sync::Arc; -use tokio::sync::mpsc; -use tokio::time::{Duration, Instant}; +use tokio::time::Duration; use tokio::sync::mpsc::Sender as Sender; use async_trait::async_trait; @@ -16,38 +15,38 @@ pub mod v2 { use super::*; use std::collections::{HashMap, BTreeMap, VecDeque}; - type MpscSender<'a> = Arc>>; + type MpscSender = Arc>; // type EventHandlers<'a> = Vec>>; - type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender<'a>)>; + type EventHandlers = HashMap; // type wrapper for service wait queue - type ConnectionQueue<'a> = BTreeMap>; + type ConnectionQueue = BTreeMap>>; #[derive(Debug)] - pub struct ServicesController<'a> { + pub struct ServicesController { // i.e. yandex.ru #[allow(unused)] - name : &'a str, + name : String, // i.e. yandex.ru:443 - access_url : String, + access_url : Arc, // "OK" or "Unavailable" state: ServiceState, // btree map with key as max wait time and it's key to hashmap - config: ConnectionQueue<'a>, + config: ConnectionQueue, // Map of processes with their (trigger and mpsc sender) - event_registrator : EventHandlers<'a>, + event_registrator : EventHandlers, } - impl<'a> PartialEq for ServicesController<'a> { + impl PartialEq for ServicesController { fn eq(&self, other: &Self) -> bool { self.access_url == other.access_url } } - impl<'a> ServicesController<'a> { - pub fn new() -> ServicesController<'a> { + impl ServicesController { + pub fn new() -> ServicesController { ServicesController { - name : "", - access_url : String::new(), + name : String::new(), + access_url : Arc::from(String::new()), state : ServiceState::Unavailable, config: ConnectionQueue::new(), event_registrator : EventHandlers::new(), @@ -55,45 +54,46 @@ pub mod v2 { } pub fn with_access_name( mut self, - hostname: &'a str, - access_url: String, - ) -> ServicesController<'a> { - self.name = hostname; - self.access_url = access_url; + hostname: &str, + access_url: &str, + ) -> ServicesController { + self.name = hostname.to_string(); + self.access_url = Arc::from(access_url); self } pub fn with_params( mut self, - conn_queue: ConnectionQueue<'a>, - event_reg: EventHandlers<'a>, - ) -> ServicesController<'a> { + conn_queue: ConnectionQueue, + event_reg: EventHandlers, + ) -> ServicesController { self.config = conn_queue; self.event_registrator = event_reg; self } - pub fn get_access_url(hostname: &'a str, port: Option<&u32>) -> String { + pub fn get_access_url(hostname: &str, port: Option<&u32>) -> String { format!("{}{}", hostname, port.map_or_else(|| "".to_string(), |p| format!(":{}", p))) } pub fn add_process( &mut self, - proc_name: &'a str, - trigger: Triggers<'a>, - sender: MpscSender<'a>, + proc_name: &str, + trigger: Triggers, + sender: MpscSender, ) { + let proc_name: Arc = Arc::from(proc_name); // queue add if let Triggers::Service { wait, .. } = trigger { self.config.entry(wait) - .and_modify(|el| el.push_back(proc_name)) + .and_modify(|el| el.push_back(proc_name.clone())) .or_insert({ let mut temp = VecDeque::new(); - temp.push_back(proc_name); + temp.push_back(proc_name.clone()); temp }); } // event add - self.event_registrator.entry(proc_name).or_insert((trigger, sender)); + self.event_registrator.entry(proc_name.to_string()).or_insert((trigger, sender)); } async fn check_state(&self) -> anyhow::Result<()> { let mut addrs = self.access_url.to_socket_addrs()?; @@ -102,13 +102,13 @@ pub mod v2 { } Ok(()) } - async fn trigger_on(&'a mut self) { + async fn trigger_on(&mut self) { match self.state { ServiceState::Ok => { let _ = self.event_registrator .iter() .map(|(_, (_, el))| async { - let _ = el.send(Events::Positive(&self.access_url)).await; + let _ = el.send(Events::Positive(self.access_url.clone())).await; }); }, ServiceState::Unavailable => { @@ -117,7 +117,7 @@ pub mod v2 { }, } } - async fn looped_check(self: &'a mut Self) { + async fn looped_check(self: &mut Self) { let longest = self.config.last_entry().unwrap(); let longest = longest.key(); let mut interapter = tokio::time::interval(tokio::time::Duration::from_secs(1)); @@ -143,19 +143,21 @@ pub mod v2 { let now = timer.elapsed(); let iterator = self.config.iter() .filter(|(&a, _)| tokio::time::Duration::from_secs(a as u64) <= now) - .flat_map(|(_, a)| a.iter().copied()) - .collect::>(); + .flat_map(|(_, a)| a.iter().cloned()) + .collect::>>(); for name in iterator { - let sender_opt = self.event_registrator.get(name) + let proc_name = name.to_string(); + info!("Trying to notify process `{}` ...", &proc_name); + let sender_opt = self.event_registrator.get(&proc_name) .map(|(trigger, sender)| - (trigger.to_service_negative_event(name), sender) + (trigger.to_service_negative_event(name.clone()), sender) ); if let Some((tr, tx)) = sender_opt { let _ = tx.send(tr.unwrap()).await; } else { - error!("Cannot find {} channel sender in {} service", name, &self.access_url) + error!("Cannot find {} channel sender in {} service", name.clone(), &self.access_url) } } } @@ -166,8 +168,8 @@ pub mod v2 { } } #[async_trait] - impl<'a> ProcessUnit<'a> for ServicesController<'a> { - async fn process(&'a mut self) { + impl ProcessUnit for ServicesController { + async fn process(&mut self) { // check_service(hostname, port) let current_state = self.check_state().await; match (&self.state, current_state) { -- 2.40.1 From 09c1baed8e7771798f47f698a4235464387f47dd Mon Sep 17 00:00:00 2001 From: prplV Date: Sun, 4 May 2025 09:14:40 -0400 Subject: [PATCH 38/44] structs bug fixed --- noxis-rs/src/options/structs.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 413050e..31c0cb6 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -41,13 +41,13 @@ impl<'a> FileTriggerType { pub fn event(&self, file_name: Arc, trigger: Arc) -> Events { return match self { FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)), - FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)), + FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(file_name, DependencyType::File, trigger)), } } pub fn event_from_file_trigger_controller(&self, file_name: Arc, trigger: &FileTriggersForController) -> Events { return match self { FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_change.clone())), - FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_delete.clone())), + FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(file_name, DependencyType::File, trigger.on_delete.clone())), } } } -- 2.40.1 From 34979a035da3c60605a2e00ea362bb13d1671bf5 Mon Sep 17 00:00:00 2001 From: prplV Date: Sun, 4 May 2025 09:15:03 -0400 Subject: [PATCH 39/44] OWNERSHIP FIX: files with Arc --- noxis-rs/src/utils/files.rs | 51 +++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index 435813c..d286c8f 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -12,46 +12,43 @@ pub mod v2 { use log::{error, info, warn}; - - // use std::collections::HashMap; use crate::options::structs::{FileTriggerType, FileTriggersForController as Triggers, ProcessUnit}; use super::*; use std::{collections::HashMap, path::Path}; - type MpscSender<'a> = Arc>>; - // type EventHandlers<'a> = HashMap - type EventHandlers<'a> = HashMap<&'a str, (Triggers<'a>, MpscSender<'a>)>; + type MpscSender = Arc>; + type EventHandlers = HashMap, (Triggers, MpscSender)>; #[derive(Debug)] - pub struct FilesController<'a> { - name : &'a str, + pub struct FilesController { + name : Arc, path : String, + code_name : Arc, watcher : Option, - // obj: Arc, - triggers : EventHandlers<'a>, - code_name : String, + triggers : EventHandlers, } - impl<'a> PartialEq for FilesController<'a> { + impl PartialEq for FilesController { fn eq(&self, other: &Self) -> bool { - self.path == other.path && self.name == other.name + self.code_name == other.code_name } } - impl<'a> FilesController<'a> { - pub fn new(name: &'a str, triggers: EventHandlers<'a>) -> FilesController<'a> { + impl FilesController { + pub fn new(name: &str, triggers: EventHandlers) -> FilesController { + let name: Arc = Arc::from(name); Self { - name, + name: name.clone(), path : String::new(), watcher: None, triggers, - code_name : name.to_string(), + code_name : name.clone(), } } - pub fn with_path(mut self, path: impl AsRef) -> anyhow::Result> { + pub fn with_path(mut self, path: impl AsRef) -> anyhow::Result { self.path = path.as_ref().to_string_lossy().into_owned(); self.watcher = { - match create_watcher(self.name, &self.path) { + match create_watcher(&self.name, &self.path) { Ok(val) => Some(val), Err(er) => { error!("Cannot create watcher for {} ({}) due to {}", self.name, &self.path, er); @@ -59,25 +56,25 @@ } } }; - self.code_name = format!("{}{}", &self.path, &self.code_name); + self.code_name = Arc::from(format!("{}{}", &self.path, &self.code_name)); Ok(self) } - pub fn add_event(&mut self, file_controller : FilesController<'a>) { + pub fn add_event(&mut self, file_controller : FilesController) { for (k, v) in file_controller.triggers { self.triggers.entry(k).or_insert(v); } } - async fn trigger_on(&'a mut self, trigger_type: Option) { + async fn trigger_on(&mut self, trigger_type: Option) { let _ = self.triggers.iter() .map(|(prc_name, (triggers, channel))| async { let _ = channel.send({ match &trigger_type { None => { - Events::Positive(&self.code_name) + Events::Positive(self.code_name.clone()) }, Some(event) => { info!("Event on file {} ({}) : {}. Notifying `{}` ...", self.name, &self.path, event, *prc_name); - event.event_from_file_trigger_controller(self.name, triggers) + event.event_from_file_trigger_controller(self.code_name.clone(), triggers) }, } }).await; @@ -85,11 +82,11 @@ } } #[async_trait] - impl<'a> ProcessUnit<'a> for FilesController<'a> { - async fn process(&'a mut self) { + impl ProcessUnit for FilesController { + async fn process(&mut self) { // polling file check // 1) existing check - if let Ok(_) = check_file(self.name, &self.path).await { + if let Ok(_) = check_file(&self.name, &self.path).await { match &mut self.watcher { Some(notify) => { let mut buffer = [0; 1024]; @@ -100,7 +97,7 @@ ) { warn!("File {} ({}) was changed", self.name, &self.path); if recreate_watcher { - self.watcher = match create_watcher(self.name, &self.path) { + self.watcher = match create_watcher(&self.name, &self.path) { Ok(notifier) => Some(notifier), Err(er) => { error!("Failed to recreate watcher for {} ({}) due to {}", -- 2.40.1 From 281841b68a2ed5be2accbdb1735562306ad5561c Mon Sep 17 00:00:00 2001 From: prplV Date: Sun, 4 May 2025 09:19:50 -0400 Subject: [PATCH 40/44] OWNERSHIP FIX : prcs with Arc (instead of &'_ str) --- noxis-rs/src/utils/prcs.rs | 50 +++++++++++++++++----------------- noxis-rs/src/utils/services.rs | 3 +- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index bb0b94d..3f10903 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -15,32 +15,32 @@ pub mod v2 { use super::*; #[derive(Debug)] - pub struct ProcessesController<'a> { - name: &'a str, + pub struct ProcessesController { + name: Arc, bin: String, // obj: Arc, state: ProcessState, - event_reader: MpscReciever>, - negative_events: HashSet<&'a str>, + event_reader: MpscReciever, + negative_events: HashSet>, } - impl<'a> PartialEq for ProcessesController<'a> { + impl PartialEq for ProcessesController { fn eq(&self, other: &Self) -> bool { self.bin == other.bin } } - impl<'a> ProcessesController<'a> { - pub fn new(name: &'a str, event_reader: MpscReciever>) -> ProcessesController<'a> { + impl ProcessesController { + pub fn new(name: &str, event_reader: MpscReciever) -> ProcessesController { ProcessesController { - name, - bin: String::new(), + name : Arc::from(name), + bin : String::new(), state : ProcessState::Stopped, event_reader, negative_events : HashSet::new(), } } - pub fn with_exe(mut self, bin: impl AsRef) -> ProcessesController<'a> { + pub fn with_exe(mut self, bin: impl AsRef) -> ProcessesController { self.bin = bin.as_ref().to_string_lossy().into_owned(); self } @@ -51,22 +51,22 @@ pub mod v2 { info!("Event on {} `{}` for {}. Ignoring ...", dep_type, dep_name, self.name); }, "stop" => { - if is_active(self.name).await { + if is_active(&self.name).await { info!("Event on {} `{}` for {}. Stopping ...", dep_type, dep_name, self.name); - terminate_process(self.name).await; + terminate_process(&self.name).await; self.state = ProcessState::Stopped; } }, "hold" => { - if !is_frozen(self.name).await { + if !is_frozen(&self.name).await { info!("Event on {} `{}` for {}. Freezing ...", dep_type, dep_name, self.name); - freeze_process(self.name).await; + freeze_process(&self.name).await; self.state = ProcessState::Holding; } }, "restart" => { info!("Event on {} `{}` for {}. Restarting ...", dep_type, dep_name, self.name); - let _ = restart_process(self.name, &self.bin).await; + let _ = restart_process(&self.name, &self.bin).await; }, _ => error!("Impermissible trigger in file-trigger for {}. Ignoring event ...", self.name), } @@ -75,13 +75,13 @@ pub mod v2 { } #[async_trait] - impl<'a> ProcessUnit<'a> for ProcessesController<'a> { - async fn process(&'a mut self) { + impl ProcessUnit for ProcessesController { + async fn process(&mut self) { if self.negative_events.len() == 0 { match self.state { ProcessState::Holding => { info!("No negative dependecies events on {} process. Unfreezing ...", self.name); - if let Err(er) = unfreeze_process(self.name).await { + if let Err(er) = unfreeze_process(&self.name).await { error!("Cannot unfreeze process {} : {}", self.name, er); } else { self.state = ProcessState::Pending; @@ -89,7 +89,7 @@ pub mod v2 { }, ProcessState::Stopped => { info!("No negative dependecies events on {} process. Starting ...", self.name); - if let Err(er) = start_process(self.name, &self.bin).await { + if let Err(er) = start_process(&self.name, &self.bin).await { error!("Cannot start process {} : {}", self.name, er); } else { self.state = ProcessState::Pending; @@ -101,8 +101,8 @@ pub mod v2 { while let Ok(event) = self.event_reader.try_recv() { match event { Events::Positive(target) => { - if self.negative_events.contains(target) { - self.negative_events.remove(target); + if self.negative_events.contains(&target) { + self.negative_events.remove(&target); } }, Events::Negative(event) => { @@ -110,12 +110,12 @@ pub mod v2 { NegativeOutcomes::FileWasChanged(target, dep_type, trigger) | NegativeOutcomes::FileWasMovedOrDeleted(target, dep_type, trigger) | NegativeOutcomes::ServiceIsUnreachable(target, dep_type, trigger) => { - if !self.negative_events.contains(target) { - self.negative_events.insert(target); + if !self.negative_events.contains(&target) { + self.negative_events.insert(target.clone()); self.trigger_on( - target, - trigger, + &target, + &trigger, dep_type ).await; } diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index 0b048e5..f5d8ff1 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -1,5 +1,4 @@ -use crate::options::structs::{CustomError, Services}; -use super::prcs::{is_active, is_frozen}; +use crate::options::structs::CustomError; use log::{error, warn}; use std::net::{TcpStream, ToSocketAddrs}; use std::sync::Arc; -- 2.40.1 From a16eb78b79e77a7de34dc26713127e01b3cdd2a7 Mon Sep 17 00:00:00 2001 From: prplV Date: Sun, 4 May 2025 09:31:42 -0400 Subject: [PATCH 41/44] key: String -> Arc --- noxis-rs/src/utils/services.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index f5d8ff1..a381cb1 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -16,7 +16,7 @@ pub mod v2 { type MpscSender = Arc>; // type EventHandlers<'a> = Vec>>; - type EventHandlers = HashMap; + type EventHandlers = HashMap, (Triggers, MpscSender)>; // type wrapper for service wait queue type ConnectionQueue = BTreeMap>>; @@ -92,7 +92,7 @@ pub mod v2 { }); } // event add - self.event_registrator.entry(proc_name.to_string()).or_insert((trigger, sender)); + self.event_registrator.entry(proc_name).or_insert((trigger, sender)); } async fn check_state(&self) -> anyhow::Result<()> { let mut addrs = self.access_url.to_socket_addrs()?; @@ -148,7 +148,7 @@ pub mod v2 { for name in iterator { let proc_name = name.to_string(); info!("Trying to notify process `{}` ...", &proc_name); - let sender_opt = self.event_registrator.get(&proc_name) + let sender_opt = self.event_registrator.get(&name) .map(|(trigger, sender)| (trigger.to_service_negative_event(name.clone()), sender) ); -- 2.40.1 From 403285a9377031b48ef1ed1706464c2659016cff Mon Sep 17 00:00:00 2001 From: prplV Date: Sun, 4 May 2025 09:53:59 -0400 Subject: [PATCH 42/44] OWNERSHIP FIX 3 --- noxis-rs/src/main.rs | 4 +- noxis-rs/src/utils.rs | 85 +++++++++++++++++++++++++++++-------------- 2 files changed, 58 insertions(+), 31 deletions(-) diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index 79e5b8c..b10a5da 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -58,9 +58,7 @@ async fn main() -> anyhow::Result<()>{ handler.push(ctrlc); let monitoring = tokio::spawn(async move { - let config = if !rx_brd.is_empty() { - rx_brd.recv().await? - } else { + let config = { let mut tick = tokio::time::interval(Duration::from_millis(500)); loop { tick.tick().await; diff --git a/noxis-rs/src/utils.rs b/noxis-rs/src/utils.rs index 3018f69..4405669 100644 --- a/noxis-rs/src/utils.rs +++ b/noxis-rs/src/utils.rs @@ -36,38 +36,39 @@ pub mod v2 { use crate::options::structs::{Events, FileTriggersForController, ProcessUnit, Triggers}; use super::*; - enum ControllerResult<'a> { - Process(Option>), - File(Option>), - Service(Option>), + enum ControllerResult { + Process(Option), + File(Option), + Service(Option), } #[derive(Debug)] - struct Supervisor<'a> { - prcs : LinkedList>, - files : LinkedList>, - services : LinkedList>, + struct Supervisor { + prcs : LinkedList, + files : LinkedList, + services : LinkedList, } - impl<'a> Supervisor<'a> { - pub fn new() -> Supervisor<'a> { + impl Supervisor { + pub fn new() -> Supervisor { Supervisor { prcs: LinkedList::new(), files: LinkedList::new(), services: LinkedList::new()} } - pub async fn with_config(mut self, config: &'a Processes) -> Supervisor<'a> { + pub async fn with_config(mut self, config: &Processes) -> Supervisor { let _ = config.processes.iter() .for_each(|prc| { - let (rx, tx) = mpsc::channel::>(10); + let (rx, tx) = mpsc::channel::(10); let temp = ProcessesController::new(&prc.name, tx).with_exe(&prc.path); if !self.prcs.contains(&temp) { self.prcs.push_back(temp); } let rx = Arc::new(rx); + let proc_name: Arc = Arc::from(prc.name.clone()); let _ = prc.dependencies.files.iter() .for_each(|file| { let mut hm = HashMap::new(); - let triggers = FileTriggersForController { on_change: &file.triggers.on_change, on_delete: &file.triggers.on_delete}; - hm.insert(prc.name.as_str(), (triggers, rx.clone())); + let triggers = FileTriggersForController { on_change: Arc::from(file.triggers.on_change.clone()), on_delete: Arc::from(file.triggers.on_delete.clone())}; + hm.insert(proc_name.clone(), (triggers, rx.clone())); let tempfile = FilesController::new(&file.filename.as_str(), hm) .with_path(&file.src); @@ -90,23 +91,24 @@ pub mod v2 { let rx = rx.clone(); let serv_cont = ServicesController::new().with_access_name( &serv.hostname, - access_url + &access_url ); // triggers - let triggers = Triggers::new_service(&serv.triggers.on_lost, serv.triggers.wait); + let arc: Arc = Arc::from(serv.triggers.on_lost.clone()); + let triggers = Triggers::new_service(arc, serv.triggers.wait); if let Some(proc) = self.services.iter_mut().find(|a| &&serv_cont == a) { proc.add_process(&prc.name, triggers, rx); } else { // vecdeque for queue - let mut vec: VecDeque<&'a str> = VecDeque::new(); - vec.push_back(&prc.name); + let mut vec: VecDeque> = VecDeque::new(); + vec.push_back(proc_name.clone()); // connection_queue - let mut connection_queue: BTreeMap> = BTreeMap::new(); + let mut connection_queue: BTreeMap>> = BTreeMap::new(); connection_queue.insert(serv.triggers.wait, vec); // event_reg let mut hm = HashMap::new(); - hm.insert(prc.name.as_str(), (triggers, rx)); + hm.insert(proc_name.clone(), (triggers, rx)); let serv_cont = serv_cont.with_params(connection_queue, hm); self.services.push_back(serv_cont); @@ -118,21 +120,48 @@ pub mod v2 { pub fn get_stats(&self) -> String { format!("processes: {}, files: {}, services: {}", self.prcs.len(),self.files.len(), self.services.len()) } - async fn proccess_prc(&mut self) { - - } } #[async_trait] - impl<'a> ProcessUnit<'a> for Supervisor<'a> { - async fn process(&'a mut self) { + impl ProcessUnit for Supervisor { + async fn process(&mut self) { info!("Initializing monitoring ..."); loop { - // let mut tasks: Vec> = vec![]; + let mut tasks: Vec> = vec![]; // let (mut prc, mut file, mut serv) = (self.prcs.pop_front().unwrap(), self.files.pop_front().unwrap(), self.services.pop_front().unwrap()); // let res = tokio::join!(prc.process(), file.process(), serv.process()); if let Some(mut val) = self.prcs.pop_front() { - tokio::spawn(async move {val.process().await;}).await; + tasks.push( + tokio::spawn( async move { + val.process().await; + ControllerResult::Process(Some(val)) + }) + ); + } + if let Some(mut val) = self.files.pop_front() { + tasks.push( + tokio::spawn( async move { + val.process().await; + ControllerResult::File(Some(val)) + }) + ); + } + if let Some(mut val) = self.services.pop_front() { + tasks.push( + tokio::spawn( async move { + val.process().await; + ControllerResult::Service(Some(val)) + }) + ); + } + for task in tasks { + match task.await { + Ok(ControllerResult::Process(Some(val))) => self.prcs.push_back(val), + Ok(ControllerResult::File(Some(val))) => self.files.push_back(val), + Ok(ControllerResult::Service(Some(val))) => self.services.push_back(val), + Err(er) => error!("Controller task crushed : {er}. Cannot push back to the exec queue ..."), + _ => { /* DEAD END (CAN NOT BE EXECUTED) */}, + } } tokio::time::sleep(Duration::from_millis(100)).await; } @@ -149,7 +178,7 @@ pub mod v2 { ) -> anyhow::Result<()> { let mut supervisor = Supervisor::new().with_config(&config).await; info!("Monitoring: {} ", &supervisor.get_stats()); - supervisor.process().await; + supervisor.process().await; Ok(()) } -- 2.40.1 From 3dd238cf974fc809fd60f368cca2c066429c8ce7 Mon Sep 17 00:00:00 2001 From: prplV Date: Sun, 4 May 2025 11:01:07 -0400 Subject: [PATCH 43/44] debug -> opened issue #40 --- noxis-rs/Cargo.toml | 2 +- noxis-rs/src/options/structs.rs | 3 +++ noxis-rs/src/utils.rs | 3 ++- noxis-rs/src/utils/files.rs | 29 +++++++++++++++-------------- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/noxis-rs/Cargo.toml b/noxis-rs/Cargo.toml index 8e35c7d..3fa9d6d 100644 --- a/noxis-rs/Cargo.toml +++ b/noxis-rs/Cargo.toml @@ -8,7 +8,7 @@ anyhow = "1.0.93" chrono = "0.4.38" clap = { version = "4.5.21", features = ["derive"] } env_logger = "0.11.3" -inotify = "0.10.2" +inotify = "0.11.0" log = "0.4.22" pcap = "2.2.0" redis = "0.29.2" diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 31c0cb6..56be896 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -5,6 +5,7 @@ use serde::{Deserialize, Serialize}; use async_trait::async_trait; use std::sync::Arc; +#[derive(Debug)] pub enum DependencyType { File, Service, @@ -93,10 +94,12 @@ pub enum ProcessState { Stopped, StoppedByCli, } +#[derive(Debug)] pub enum Events { Positive(Arc), Negative(NegativeOutcomes) } +#[derive(Debug)] pub enum NegativeOutcomes { FileWasChanged(Arc, DependencyType, Arc), FileWasMovedOrDeleted(Arc, DependencyType, Arc), diff --git a/noxis-rs/src/utils.rs b/noxis-rs/src/utils.rs index 4405669..b1e315a 100644 --- a/noxis-rs/src/utils.rs +++ b/noxis-rs/src/utils.rs @@ -21,7 +21,6 @@ use std::sync::Arc; // use tokio::join; use tokio::sync::mpsc; use tokio::time::Duration; -use tokio::sync::broadcast::Receiver; // use tokio::sync::mpsc::{Receiver as MpscReciever, Sender as MpscSender}; // controllers import use prcs::v2::ProcessesController; @@ -36,6 +35,7 @@ pub mod v2 { use crate::options::structs::{Events, FileTriggersForController, ProcessUnit, Triggers}; use super::*; + #[derive(Debug)] enum ControllerResult { Process(Option), File(Option), @@ -127,6 +127,7 @@ pub mod v2 { async fn process(&mut self) { info!("Initializing monitoring ..."); loop { + // dbg!(&self); let mut tasks: Vec> = vec![]; // let (mut prc, mut file, mut serv) = (self.prcs.pop_front().unwrap(), self.files.pop_front().unwrap(), self.services.pop_front().unwrap()); // let res = tokio::join!(prc.process(), file.process(), serv.process()); diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index d286c8f..ce431fb 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -65,20 +65,18 @@ } } async fn trigger_on(&mut self, trigger_type: Option) { - let _ = self.triggers.iter() - .map(|(prc_name, (triggers, channel))| async { - let _ = channel.send({ - match &trigger_type { - None => { - Events::Positive(self.code_name.clone()) - }, - Some(event) => { - info!("Event on file {} ({}) : {}. Notifying `{}` ...", self.name, &self.path, event, *prc_name); - event.event_from_file_trigger_controller(self.code_name.clone(), triggers) - }, - } - }).await; - }); + for (prc_name, (triggers, channel)) in &self.triggers { + let msg = match &trigger_type { + None => { + Events::Positive(self.code_name.clone()) + }, + Some(event) => { + info!("Event on file {} ({}) : {}. Notifying `{}` ...", &self.name, &self.path, event, &prc_name); + event.event_from_file_trigger_controller(self.code_name.clone(), &triggers) + }, + }; + let _ = channel.send(msg).await; + } } } #[async_trait] @@ -86,11 +84,14 @@ async fn process(&mut self) { // polling file check // 1) existing check + // dbg!(&self); if let Ok(_) = check_file(&self.name, &self.path).await { match &mut self.watcher { Some(notify) => { let mut buffer = [0; 1024]; if let Ok(mut notif_events) = notify.read_events(&mut buffer) { + // notif_events.into_iter().for_each(|mask| {dbg!(&mask.mask);}); + // todo!(); if let (recreate_watcher, true) = ( notif_events.any(|mask| mask.mask == EventMask::DELETE_SELF), notif_events.any(|mask| mask.mask == EventMask::MODIFY) -- 2.40.1 From aae0391a0c80b5d8c8ed83561f41c567af12c235 Mon Sep 17 00:00:00 2001 From: prplV Date: Mon, 5 May 2025 09:14:25 -0400 Subject: [PATCH 44/44] files fix --- noxis-rs/src/utils/files.rs | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index ce431fb..da87b0f 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -19,11 +19,18 @@ type MpscSender = Arc>; type EventHandlers = HashMap, (Triggers, MpscSender)>; + #[derive(Debug)] + enum FileState { + Ok, + NotFound, + } + #[derive(Debug)] pub struct FilesController { name : Arc, path : String, code_name : Arc, + state : FileState, watcher : Option, triggers : EventHandlers, } @@ -38,9 +45,10 @@ pub fn new(name: &str, triggers: EventHandlers) -> FilesController { let name: Arc = Arc::from(name); Self { - name: name.clone(), + name : name.clone(), path : String::new(), - watcher: None, + state : FileState::Ok, + watcher : None, triggers, code_name : name.clone(), } @@ -86,6 +94,12 @@ // 1) existing check // dbg!(&self); if let Ok(_) = check_file(&self.name, &self.path).await { + if let FileState::NotFound = self.state { + info!("File {} ({}) was found in determined scope. Notifying ...", self.name, self.code_name); + self.state = FileState::Ok; + // reseting negative outcome in prc + self.trigger_on(None).await; + } match &mut self.watcher { Some(notify) => { let mut buffer = [0; 1024]; @@ -110,7 +124,7 @@ }, } } - self.trigger_on(Some(FileTriggerType::OnChange)).await; + self.trigger_on(Some(FileTriggerType::OnChange)).await; return; } } @@ -118,8 +132,11 @@ None => { /* DEAD END */}, } } else { - warn!("File {} ({}) was not found in determined scope", self.name, &self.path); - self.trigger_on(Some(FileTriggerType::OnDelete)).await; + if let FileState::Ok = self.state { + warn!("File {} ({}) was not found in determined scope", self.name, &self.path); + self.state = FileState::NotFound; + self.trigger_on(Some(FileTriggerType::OnDelete)).await; + } return; } self.trigger_on(None).await; -- 2.40.1