diff --git a/.github/workflows/rust_riscv_build.yml b/.github/workflows/rust_riscv_build.yml new file mode 100644 index 0000000..052967d --- /dev/null +++ b/.github/workflows/rust_riscv_build.yml @@ -0,0 +1,40 @@ +name: Rust riscv64 build + +on: + push: + branches: [ master , rc ] + pull_request: + branches: [ master , rc ] + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config build-essential gcc-riscv64-unknown-elf gcc-riscv64-linux-gnu binutils-riscv64-linux-gnu + + - name: Set up Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + + - name: Cache dependencies + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Set up riscv64gc toolchain + run: rustup target add riscv64gc-unknown-linux-gnu + + - name: Build x86_64-unknown-linux-gnu + run: cargo riscv64 --verbose diff --git a/.github/workflows/rust_test.yml b/.github/workflows/rust_test.yml new file mode 100644 index 0000000..8ec85af --- /dev/null +++ b/.github/workflows/rust_test.yml @@ -0,0 +1,37 @@ +name: Rust test + +on: + push: + branches: [ master , rc ] + pull_request: + branches: [ master , rc ] + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config build-essential + + - name: Set up Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + + - name: Cache dependencies + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Run tests + run: cargo test --verbose diff --git a/.github/workflows/rust_x86_build.yml b/.github/workflows/rust_x86_build.yml new file mode 100644 index 0000000..6be838e --- /dev/null +++ b/.github/workflows/rust_x86_build.yml @@ -0,0 +1,37 @@ +name: Rust x86_64 build + +on: + push: + branches: [ master , rc ] + pull_request: + branches: [ master , rc ] + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install dependencies + run: sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config build-essential + + - name: Set up Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + + - name: Cache dependencies + uses: actions/cache@v4 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Build x86_64-unknown-linux-gnu + run: cargo x86_64 --verbose diff --git a/.gitignore b/.gitignore index eae2549..37fc943 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,9 @@ /target .idea -/.env +.env Cargo.lock hagent_test.sock release -*.sock \ No newline at end of file +*.sock +*.bak +docker-compose.yml \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 5b477a0..645c6e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ resolver = "2" members = [ "noxis-rs", - "noxis-cli", + "noxis-cli", "noxis-proxy", ] [profile.dev] diff --git a/README.md b/README.md index 449db98..beefe9d 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,20 @@ -# noxis-rs +# noxis + ![Logo](logo.png) -### In-container integrating util to handle processes runtime -( with amd64 and riscv64 support ) -## Depends on +`noxis` - monitoring util with special attention on +1) **Speed** +2) **Multiplatform** execution *(with `amd64` and `riscv64` **support**)* +3) **Smallness** and **Optimization** + +It's **main tasks** are + - to manage the processes that occur inside the container or in the target system. + - collect data (metrics); + - monitor the availability of system files necessary for the operation of processes; + - check whether there is a connection between processes and services, where the information comes from or where it is sent. + +## Build requirements - `rustup (>=1.27.1)` - `gcc-riscv64-unknown-elf` - `build-essential` @@ -12,60 +22,70 @@ - `binutils-riscv64-linux-gnu` -## Setting up -Download and execute rustup.sh -~~~bash +## Key items in repo + +1) Main daemon `noxis-rs` +2) CLI `noxis-cli` +3) Unix-Socket to Web-Socket **Proxy** for integrations `noxis-proxy` + +## Setting up device + +Download and execute rustup.sh *(for building)* +~~~ bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh ~~~ ## Building -1. Clone this repo `runner-rs` +1. Clone `noxis` -~~~bash - git clone https://github.com/prplV/runner-rs +~~~ bash + git clone https://github.com/prplV/noxis ~~~ -2. Enter project's dir and set up toolchain list to compile code for RISC-V and AMD64 +2. Enter project's dir and set up toolchain list to compile code for RISC-V or x86_64 -~~~bash - cd runner-rs/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu +~~~ bash + cd noxis/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu ~~~ > [!NOTE] -> Cargo is configured to build an app for amd64/linux defaultly. RISCV-based compilation is optional. +> Cargo is configured to build an apputil for x86_64/linux defaultly. RISCV-based compilation is optional. -3.1. Release build of app for amd64/linux +3.1. Release build of util for x86_64/linux ~~~bash cargo x86_64 ~~~ -3.2. Release build of app for riscv64/linux +3.2. Release build of util for riscv64/linux ~~~bash cargo riscv64 ~~~ -3.3. Release build of app for both (riscv64 and amd64) +3.3. Release build of util for both (riscv64 and x86_64) ~~~bash cargo unibuild ~~~ -## Execution for amd64/linux +## Execution **DAEMON** for x86_64/linux + +1) If you work on x86_64/linux machine execute: + ~~~bash -./target/x86_64-unknown-linux-gnu/release/runner-rs +./target/x86_64-unknown-linux-gnu/release/noxis-rs ~~~ or ~~~bash cargo run_x86 ~~~ +2) If you work on riscv64/linux machine execute: -## Execution for riscv64/linux ~~~bash -./target/riscv64gc-unknown-linux-gnu/release/runner-rs +./target/riscv64gc-unknown-linux-gnu/release/noxis-rs ~~~ or > [!CAUTION] diff --git a/logo.png b/logo.png index b8424ef..c99ab99 100644 Binary files a/logo.png and b/logo.png differ diff --git a/noxis-cli/.env.example b/noxis-cli/.env.example new file mode 100644 index 0000000..b0d53a3 --- /dev/null +++ b/noxis-cli/.env.example @@ -0,0 +1 @@ +NOXIS_SOCKET_PATH = "/home/vladislavd/diplom_code/noxis-rs/noxis.sock" \ No newline at end of file diff --git a/noxis-cli/Cargo.toml b/noxis-cli/Cargo.toml index e02d5f8..712b2b6 100644 --- a/noxis-cli/Cargo.toml +++ b/noxis-cli/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" [dependencies] anyhow = "1.0.94" clap = { version = "4.5.22", features = ["derive"] } +dotenv = "0.15.0" serde = { version = "1.0.215", features = ["derive"] } serde_json = "1.0.133" thiserror = "2.0.11" diff --git a/noxis-cli/src/cli.rs b/noxis-cli/src/cli.rs index 5a82b64..07a0c37 100644 --- a/noxis-cli/src/cli.rs +++ b/noxis-cli/src/cli.rs @@ -1,52 +1,43 @@ use clap::{Parser, Subcommand}; +use metrics_models::MetricsMode; #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] pub struct Cli { #[arg( short, - default_value="noxis-rs.sock", - help="explicit specify of NOXIS Socket file" + default_value = "noxis-rs.sock", + help = "explicit specify of NOXIS Socket file" )] - pub socket : String, - #[command( - subcommand, - help = "to manage Noxis work", - )] - pub command : Commands, + pub socket: String, + #[command(subcommand, help = "to manage Noxis work")] + pub command: Commands, } #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] pub enum Commands { - #[command( - about = "To get info about current Noxis status", - )] - Status, - #[command( - about = "To start Noxis process", - )] + #[command(about = "To get info about current Noxis status")] + Status, + #[command(about = "To start Noxis process")] Start(StartAction), - #[command( - about = "To stop Noxis process", - )] + #[command(about = "To stop Noxis process")] Stop, - #[command( - about = "To restart Noxis process", - )] + #[command(about = "To restart Noxis process")] Restart(StartAction), - #[command( - about = "To get list of processes that are being monitoring", - )] + #[command(about = "To get list of processes that are being monitoring")] Processes, // process command - #[command( - about = "To manage current process that is being monitoring", - )] + #[command(about = "To manage current process that is being monitoring")] Process(ProcessCommand), - // config command = - #[command( - about = "To manage config settings", - )] + #[command(about = "To manage config settings")] Config(ConfigCommand), + #[command(about = "To inspect system metrics in restricted mode")] + Inspect(MetricsCommand), +} + +#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] +pub struct MetricsCommand { + #[command(subcommand)] + pub mode: MetricsMode, } #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] @@ -56,96 +47,100 @@ pub struct StartAction { num_args = 1.., value_delimiter = ' ' )] - pub flags : Vec, + pub flags: Vec, } #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] pub struct ConfigCommand { #[command(subcommand)] - pub action : ConfigAction, + pub action: ConfigAction, } #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] pub enum ConfigAction { - #[command( - about = "To change current Noxis configuration", - )] + #[command(about = "To change current Noxis configuration")] Local(LocalConfig), - #[command( - about = "To change credentials of the remote config server", - )] + #[command(about = "To change credentials of the remote config server")] Remote, - #[command( - about = "To reset all config settings", - )] + #[command(about = "To reset all config settings")] Reset, + #[command(about = "To get current Noxis configuration", name = "ls")] + Show(EnvConfig), +} +#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] +pub struct EnvConfig { + // flag + #[arg(long = "env", action, help = "to read environment vars configuration")] + pub is_env: bool, } #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] pub struct LocalConfig { // flag - #[arg( - long = "json", - action, - help = "to read following input as JSON", - )] - pub is_json : bool, + #[arg(long = "json", action, help = "to read following input as JSON")] + pub is_json: bool, // value - #[arg( - help = "path to config file or config String (with --json flag)", - )] - pub config : String, + #[arg(help = "path to config file or config String (with --json flag)")] + pub config: String, } #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] pub struct ProcessCommand { - #[arg( - help = "name of needed process", - )] - pub process : String, - #[command( - subcommand, - help = "To get current process's status", - )] - pub action : ProcessAction, + #[arg(help = "name of needed process")] + pub process: String, + #[command(subcommand, help = "To get current process's status")] + pub action: ProcessAction, } #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] pub enum ProcessAction { - #[command( - about = "To get info about current process status", - )] - Status, - #[command( - about = "To start current process", - )] - Start, - #[command( - about = "To stop current process", - )] - Stop, - #[command( - about = "To freeze (hybernaze) current process", - )] - Freeze, - #[command( - about = "To unfreeze (unhybernaze) current process", - )] - Unfreeze, - #[command( - about = "To restart current process", - )] - Restart, - #[command( - about = "To get info about current process's dependencies", - )] - Deps, - #[command( - about = "To get info about current process's files-dependencies", - )] - Files, - #[command( - about = "To get info about current process's services-dependencies", - )] - Services, -} \ No newline at end of file + #[command(about = "To get info about current process status")] + Status, + #[command(about = "To start current process")] + Start, + #[command(about = "To stop current process")] + Stop, + #[command(about = "To freeze (hybernaze) current process")] + Freeze, + #[command(about = "To unfreeze (unhybernaze) current process")] + Unfreeze, + #[command(about = "To restart current process")] + Restart, + #[command(about = "To get info about current process's dependencies")] + Deps, + #[command(about = "To get info about current process's files-dependencies")] + Files, + #[command(about = "To get info about current process's services-dependencies")] + Services, +} + +pub mod metrics_models { + #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)] + pub enum MetricsMode { + #[command(about = "To capture all metrics about undercontrolled system")] + Full, + // system + #[command(about = "To capture general host info")] + Host, + #[command(about = "To capture detailed CPU metrics")] + Cpu, + #[command(about = "To capture RAM metrics")] + Ram, + #[command(about = "To capture disk environment metrics")] + Rom, + #[command(about = "To capture system net interfaces metrics")] + Network, + // processes + #[command(about = "To capture monitoring processes metrics")] + Processes, // Config + } +} + +impl Cli { + pub fn validate_socket(mut self) -> Self { + if let Ok(path) = std::env::var("NOXIS_SOCKET_PATH") { + self.socket = path; + } + self + } +} diff --git a/noxis-cli/src/cli_error.rs b/noxis-cli/src/cli_error.rs index d5bae9b..ffba19a 100644 --- a/noxis-cli/src/cli_error.rs +++ b/noxis-cli/src/cli_error.rs @@ -1,8 +1,9 @@ use thiserror::Error; #[derive(Debug, Error)] +#[allow(dead_code)] pub enum NoxisCliError { - #[error("Can't find socket `{0}`. Error : {1}")] + #[error("Can't find socket `{0}`. {1}")] NoxisDaemonMissing(String, String), #[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's runtime!")] PortIsNotWritable, @@ -11,5 +12,5 @@ pub enum NoxisCliError { #[error("Can't parse CLI struct and send as byte stream")] ToStringCliParsingParsing, #[error("Can't read Noxis response due to {0}")] - CliResponseReadError(String) -} \ No newline at end of file + CliResponseReadError(String), +} diff --git a/noxis-cli/src/cli_net.rs b/noxis-cli/src/cli_net.rs index a3300ed..1e31666 100644 --- a/noxis-cli/src/cli_net.rs +++ b/noxis-cli/src/cli_net.rs @@ -1,30 +1,37 @@ -use tokio::net::UnixStream; -use tokio::io::{AsyncWriteExt, AsyncReadExt}; -use tokio::time::{Duration, sleep}; -use anyhow::Result; -use super::Cli; use super::cli_error::NoxisCliError; +use super::Cli; +use anyhow::Result; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::net::UnixStream; +#[allow(dead_code)] async fn create_us_stream(cli: &Cli) -> Result { - Ok(UnixStream::connect(&cli.socket).await.map_err(|er| NoxisCliError::NoxisDaemonMissing((&cli.socket).to_string(), er.to_string()))?) + Ok(UnixStream::connect(&cli.socket).await.map_err(|er| { + NoxisCliError::NoxisDaemonMissing((&cli.socket).to_string(), er.to_string()) + })?) } +#[allow(dead_code)] pub async fn try_send(cli: Cli) -> Result<()> { - // let stream = create_us_stream(&cli).await; let mut stream = create_us_stream(&cli).await?; - let msg = serde_json::to_vec(&cli) - .map_err(|_| NoxisCliError::ToStringCliParsingParsing)?; + let msg = serde_json::to_vec(&cli).map_err(|_| NoxisCliError::ToStringCliParsingParsing)?; - stream.write_all(&msg) + stream + .write_all(&msg) .await .map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?; - let mut response = [0; 1024]; - stream.read(&mut response) + let mut response = Vec::new(); + stream + .read_to_end(&mut response) .await .map_err(|er| NoxisCliError::CliResponseReadError(er.to_string()))?; - println!("Received response: {}", String::from_utf8_lossy(&response)); + let response = String::from_utf8_lossy(&response); + for line in response.lines() { + println!("{}", line); + } + Ok(()) -} \ No newline at end of file +} diff --git a/noxis-cli/src/lib.rs b/noxis-cli/src/lib.rs index 5515aa9..7594858 100644 --- a/noxis-cli/src/lib.rs +++ b/noxis-cli/src/lib.rs @@ -1,5 +1,5 @@ mod cli; -mod cli_net; mod cli_error; +mod cli_net; -pub use cli::*; \ No newline at end of file +pub use cli::*; diff --git a/noxis-cli/src/main.rs b/noxis-cli/src/main.rs index 7961b75..1168e02 100644 --- a/noxis-cli/src/main.rs +++ b/noxis-cli/src/main.rs @@ -1,15 +1,16 @@ mod cli; -mod cli_net; mod cli_error; +mod cli_net; +use anyhow::Result; use clap::Parser; use cli::Cli; use cli_net::try_send; -use anyhow::Result; #[tokio::main] -async fn main() -> Result<()>{ - let cli = Cli::parse(); +async fn main() -> Result<()> { + dotenv::dotenv().ok(); + let cli = Cli::parse().validate_socket(); try_send(cli).await?; Ok(()) } diff --git a/noxis-proxy/.dockerignore b/noxis-proxy/.dockerignore new file mode 100644 index 0000000..3b3da9a --- /dev/null +++ b/noxis-proxy/.dockerignore @@ -0,0 +1,4 @@ +.env +.env.example +README.md +target diff --git a/noxis-proxy/.env.example b/noxis-proxy/.env.example new file mode 100644 index 0000000..e67257a --- /dev/null +++ b/noxis-proxy/.env.example @@ -0,0 +1,2 @@ +NOXIS_SOCKET_PATH = "/path/to/noxis.sock" +NOXIS_PROXY_PORT = "numport" \ No newline at end of file diff --git a/noxis-proxy/Cargo.toml b/noxis-proxy/Cargo.toml new file mode 100644 index 0000000..75dd591 --- /dev/null +++ b/noxis-proxy/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "noxis-proxy" +version = "0.1.0" +edition = "2024" + +[dependencies] +anyhow = "1.0.98" +axum = { version = "0.8.4", features = ["ws"] } +dotenv = "0.15.0" +tokio = { version = "1.45.1", features = ["full"] } +tracing = "0.1.41" +tracing-subscriber = "0.3.19" diff --git a/noxis-proxy/Dockerfile b/noxis-proxy/Dockerfile new file mode 100644 index 0000000..6dfcfa5 --- /dev/null +++ b/noxis-proxy/Dockerfile @@ -0,0 +1,19 @@ +FROM rust:latest AS builder +WORKDIR /app + +RUN apt update && apt install -y musl-tools +RUN rustup target add x86_64-unknown-linux-musl + +COPY . . + +RUN cargo build --release --target=x86_64-unknown-linux-musl + +FROM alpine:latest +WORKDIR /app + +COPY --from=builder /app/target/x86_64-unknown-linux-musl/release/noxis-proxy /app/noxis-proxy +RUN apk add --no-cache ca-certificates + +EXPOSE 7654 + +ENTRYPOINT ["/app/noxis-proxy"] diff --git a/noxis-proxy/docker-compose.yml b/noxis-proxy/docker-compose.yml new file mode 100644 index 0000000..a4056f3 --- /dev/null +++ b/noxis-proxy/docker-compose.yml @@ -0,0 +1,20 @@ +services: + noxis-proxy: + container_name: noxis-proxy + image: noxis-proxy:0.1.0 + networks: + - noxis-net + environment: + - NOXIS_SOCKET_PATH=./noxis.sock + - NOXIS_PROXY_PORT=7654 + - NOXIS_LOG_LEVEL=TRACE + volumes: + - /home/user/diplom_code/noxis-rs/noxis.sock:/app/noxis.sock + ports: + - 7654:7654 + restart: always + +networks: + noxis-net: + driver: bridge + diff --git a/noxis-proxy/src/main.rs b/noxis-proxy/src/main.rs new file mode 100644 index 0000000..ac3568a --- /dev/null +++ b/noxis-proxy/src/main.rs @@ -0,0 +1,96 @@ +use axum::{ + extract::{ + ws::{Message, WebSocket, WebSocketUpgrade}, + State, + }, + response::IntoResponse, + routing::get, + Router, +}; +use std::{ + path::PathBuf, str::FromStr, +}; +use tokio::net::UnixStream; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; + +#[derive(Clone)] +struct AppState { + socket_path: PathBuf, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + dotenv::dotenv().ok(); + + tracing_subscriber::fmt() + .with_max_level(tracing::Level::from_str(&std::env::var("NOXIS_LOG_LEVEL").unwrap_or_else(|_| String::from("INFO"))).unwrap_or_else(|_| tracing::Level::INFO)) + .with_writer(std::io::stdout) + .compact() + .init(); + + let app_state = AppState { + socket_path : PathBuf::new().join(std::env::var("NOXIS_SOCKET_PATH").unwrap_or_else(|_| String::from("./noxis.sock"))) + }; + let app = Router::new() + .route("/ws", get(ws_handler)) + .route("/hello", get(hello)) + .with_state(app_state); + + let bind = format!("0.0.0.0:{}", std::env::var("NOXIS_PROXY_PORT").unwrap_or_else(|_| String::from("7654"))); + + tracing::info!("Serving on {}", &bind); + + let listener = tokio::net::TcpListener::bind(bind) + .await?; + axum::serve(listener, app).await?; + Ok(()) +} + +async fn ws_handler( + ws: WebSocketUpgrade, + State(state): State, +) -> impl IntoResponse { + tracing::info!("New WebSocket connection"); + ws.on_upgrade(|socket| handle_socket(socket, state)) +} + +async fn hello( + State(_state): State, +) -> impl IntoResponse { + String::from("HELLO") +} + +async fn handle_socket(mut ws: WebSocket, state: AppState) { + tracing::info!("handle websocket"); + + let ws_receiver = tokio::spawn(async move { + while let Some(Ok(msg)) = ws.recv().await { + let mut unix_socket = match UnixStream::connect(&state.socket_path).await { + Ok(socket) => socket, + Err(e) => { + tracing::error!("Failed to connect to Unix socket: {}", e); + let _ = ws.send(Message::Text("ERROR: Unix socket connection failed".into())).await; + return; + } + }; + if let Message::Text(text) = msg { + if let Err(e) = unix_socket.write_all(text.as_bytes()).await { + tracing::error!("Failed to write to Unix socket: {}", e); + break; + } + let mut buf = Vec::new(); + match unix_socket.read_to_end(&mut buf).await { + Ok(n) if n > 0 => { + let response = String::from_utf8_lossy(&buf[..n]); + if ws.send(Message::Text(response.into_owned().into())).await.is_err() { + break; + } + } + Ok(_) | Err(_) => break, + } + } + } + }); + + let _ = ws_receiver.await; +} diff --git a/noxis-rs/.env.example b/noxis-rs/.env.example new file mode 100644 index 0000000..1909961 --- /dev/null +++ b/noxis-rs/.env.example @@ -0,0 +1,16 @@ +# ACTIONS (trigger on exist) +NOXIS_NO_HAGENT = "true" +# NOXIS_NO_LOGS = "true" +NOXIS_REFRESH_LOGS = "true" +NOXIS_NO_SUB = "true" + +# VALUES +NOXIS_HAGENT_SOCKET_PATH = "/var/run/example/hostagent.sock" +NOXIS_LOG_TO = "/var/log/noxis/noxis.log" +NOXIS_REMOTE_SERVER_URL = "ip.ip.ip.ip:port" +NOXIS_CONFIG_PATH = "./settings.json" +NOXIS_METRICS_MODE = "full" +NOXIS_SOCKET_PATH = "/path/to/noxis.sock" +NOXIS_BACKUP_FOLDER = "/path/to/backups/folder" + +NOXIS_MAX_LOG_LEVEL = "TRACE" \ No newline at end of file diff --git a/noxis-rs/Cargo.toml b/noxis-rs/Cargo.toml index 3fa9d6d..086e0f1 100644 --- a/noxis-rs/Cargo.toml +++ b/noxis-rs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noxis-rs" -version = "0.11.26" +version = "0.12.0" edition = "2021" [dependencies] @@ -20,3 +20,6 @@ noxis-cli = { path = "../noxis-cli" } dotenv = "0.15.0" futures = "0.3.31" async-trait = "0.1.88" +crossbeam = { version = "0.8.4", features = ["crossbeam-channel"] } +lazy_static = "1.5.0" +ulid = "1.2.1" diff --git a/noxis-rs/settings.json b/noxis-rs/settings.json index 496fe2a..7c74d01 100644 --- a/noxis-rs/settings.json +++ b/noxis-rs/settings.json @@ -1,6 +1,5 @@ { "dateOfCreation": "1721381809112", - "configServer": "192.168.2.37", "processes": [ { "name": "temp-process", @@ -12,7 +11,35 @@ "src": "./tests/examples/", "triggers": { "onDelete": "stop", - "onChange": "restart" + "onChange": "restart", + "doRestore" : true + } + }, + { + "filename": "none.json", + "src": "./tests/examples/", + "triggers": { + "onDelete": "stop", + "onChange": "restart", + "doRestore" : false + } + }, + { + "filename": "invalid_config.json", + "src": "./tests/examples/", + "triggers": { + "onDelete": "stop", + "onChange": "restart", + "doRestore" : false + } + }, + { + "filename": "save-conf.json", + "src": "./tests/examples/", + "triggers": { + "onDelete": "stop", + "onChange": "restart", + "doRestore" : true } } ], @@ -21,12 +48,20 @@ "hostname": "ya.ru", "port": 443, "triggers": { - "wait": 10, - "onLost": "restart" + "wait": 2, + "onLost": "stop" + } + }, + { + "hostname": "8.8.8.8", + "port": 443, + "triggers": { + "wait": 2, + "onLost": "stop" } } ] } } ] -} +} \ No newline at end of file diff --git a/noxis-rs/src/main.rs b/noxis-rs/src/main.rs index b10a5da..c39cbef 100644 --- a/noxis-rs/src/main.rs +++ b/noxis-rs/src/main.rs @@ -1,53 +1,103 @@ mod options; mod utils; -use clap::Parser; use log::{error, info}; -use options::config::*; -use options::logger::setup_logger; -use options::signals::set_valid_destructor; -use options::structs::Processes; use options::cli_pipeline::init_cli_pipeline; +use options::config::v2::init_config_mechanism; +use options::logger::setup_logger; +use options::preboot::PrebootParams; +use options::signals::set_valid_destructor; +use options::structs::ProcessUnit; +use options::structs::{bus::BusMessage, Processes}; use std::sync::Arc; use std::time::Duration; -use tokio::sync::mpsc; -use utils::*; -use options::preboot::PrebootParams; -use tokio::sync::{broadcast, oneshot}; -use options::config::v2::init_config_mechanism; +use tokio::sync::{broadcast, mpsc, oneshot}; +use utils::bus::Bus; +use utils::metrics::init_metrics_grubber; use utils::v2::init_monitoring; #[tokio::main(flavor = "multi_thread", worker_threads = 4)] -async fn main() -> anyhow::Result<()>{ - let preboot = Arc::new(PrebootParams::parse().validate()?); - +async fn main() -> anyhow::Result<()> { + let preboot = Arc::new(PrebootParams::validate()); let _ = setup_logger(); info!("Noxis is configurating..."); - // + // let (tx_brd, mut rx_brd) = broadcast::channel::(1); + // for cli to get config + let mut rx_cli_brd = tx_brd.subscribe(); // cli <-> config let (tx_oneshot, rx_oneshot) = oneshot::channel::(); let mut handler: Vec> = vec![]; - // initilaizing task for config manipulations + // to BUS channel + let (tx_to_bus, rx_to_bus) = mpsc::channel::(5); + // from BUS channels + let (tx_to_cli, rx_to_cli) = mpsc::channel::(5); + let (tx_to_supervisor, rx_to_supervisor) = mpsc::channel::(5); + let (tx_to_metrics, rx_to_metrics) = mpsc::channel::(5); + + let tx_to_bus = Arc::new(tx_to_bus); + let tx_to_cli = Arc::new(tx_to_cli); + let tx_to_supervisor = Arc::new(tx_to_supervisor); + let tx_to_metrics = Arc::new(tx_to_metrics); + + let bus_module = tokio::spawn(async move { + let mut bus = Bus::new( + rx_to_bus, + tx_to_cli.clone(), + tx_to_supervisor.clone(), + tx_to_metrics.clone(), + ); + bus.process().await; + error!("Info Bus crushed !"); + }); + handler.push(bus_module); + + // initilaizing task for config manipulations + let preboot_config = preboot.clone(); let config_module = tokio::spawn(async move { - let _ = init_config_mechanism( - rx_oneshot, - tx_brd, - preboot.clone() - ).await; + let _ = init_config_mechanism(rx_oneshot, tx_brd, preboot_config).await; }); handler.push(config_module); - - // initilaizing task for cli manipulation + + // initilaizing task for cli manipulation + let tx_bus = tx_to_bus.clone(); + let preboot_cli = preboot.clone(); let cli_module = tokio::spawn(async move { - if let Err(er) = init_cli_pipeline().await { + let config = { + let mut tick = tokio::time::interval(Duration::from_millis(500)); + loop { + tick.tick().await; + break match rx_cli_brd.try_recv() { + Ok(conf) => conf, + Err(_) => continue, + }; + } + }; + if let Err(er) = init_cli_pipeline( + preboot_cli, + Arc::new(config), + tx_oneshot, + rx_to_cli, + tx_bus.clone(), + ) + .await + { error!("CLI pipeline failed due to {}", er) } - }); + }); handler.push(cli_module); + // metrics + let tx_bus = tx_to_bus.clone(); + let metrics_module = tokio::spawn(async move { + if let Err(er) = init_metrics_grubber(tx_bus.clone(), rx_to_metrics).await { + error!("Metrics module crushed : {}", er); + } + }); + handler.push(metrics_module); + // initilaizing task for deinitializing `Noxis` let ctrlc = tokio::spawn(async move { if let Err(er) = set_valid_destructor(vec![].into()).await { @@ -57,6 +107,8 @@ async fn main() -> anyhow::Result<()>{ }); handler.push(ctrlc); + let tx_bus = tx_to_bus.clone(); + let preboot_cli = preboot.clone(); let monitoring = tokio::spawn(async move { let config = { let mut tick = tokio::time::interval(Duration::from_millis(500)); @@ -65,10 +117,10 @@ async fn main() -> anyhow::Result<()>{ break match rx_brd.try_recv() { Ok(conf) => conf, Err(_) => continue, - } + }; } }; - if let Err(er) = init_monitoring(config).await { + if let Err(er) = init_monitoring(config, preboot_cli, rx_to_supervisor, tx_bus).await { error!("Monitoring mod failed due to {}", er); } }); @@ -77,80 +129,5 @@ async fn main() -> anyhow::Result<()>{ for i in handler { let _ = i.await; } - - // setting up redis connection \ - // then conf checks to choose the most actual \ - // let processes: Processes = get_actual_config(preboot.clone()).await.unwrap_or_else(|| { - // error!("No actual configuration for runner. Stopping..."); - // std::process::exit(1); - // }); - // - // info!( - // "Current runner configuration: {}", - // &processes.date_of_creation - // ); - // info!("Runner is ready. Initializing..."); - // - // if processes.processes.is_empty() { - // error!("Processes list is null, runner-rs initialization is stopped"); - // return Err(Error::msg("Empty processes segment in config")); - // } - // let mut handler: Vec> = vec![]; - // // is in need to send to the signals handler thread - // let mut senders: Vec>> = vec![]; - // - // for proc in processes.processes.iter() { - // info!( - // "Process '{}' on stage: {}. Depends on {} file(s), {} service(s)", - // proc.name, - // proc.path, - // proc.dependencies.files.len(), - // proc.dependencies.services.len() - // ); - // - // // creating msg channel - // // can or should be executed in new thread - // let (tx, mut rx) = mpsc::channel::(1); - // let proc = Arc::new(proc.clone()); - // let tx = Arc::new(tx.clone()); - // - // senders.push(Arc::clone(&tx.clone())); - // - // let event = tokio::spawn(async move { - // run_daemons(proc.clone(), tx.clone(), &mut rx).await; - // }); - // handler.push(event); - // } - // - // // destructor addition - // handler.push(tokio::spawn(async move { - // if set_valid_destructor(Arc::new(senders)).await.is_err() { - // error!("Linux signals handler creation failed. Terminating main thread..."); - // return; - // } - // - // tokio::time::sleep(Duration::from_millis(200)).await; - // info!("End of job. Terminating main thread..."); - // std::process::exit(0); - // })); - // - // // remote config update subscription - // handler.push(tokio::spawn(async move { - // let _ = subscribe_config_stream(Arc::new(processes), preboot.clone()).await; - // })); - // - // // cli pipeline - // handler.push(tokio::spawn(async move { - // let _ = init_cli_pipeline().await; - // })); - // - // for i in handler { - // let _ = i.await; - // } Ok(()) } - -// todo: integration tests -// todo: config pulling mechanism rework (socket) -// todo: tasks management after killing all processes -// todo: diff --git a/noxis-rs/src/options.rs b/noxis-rs/src/options.rs index e53e2d0..3639682 100644 --- a/noxis-rs/src/options.rs +++ b/noxis-rs/src/options.rs @@ -1,8 +1,8 @@ // ! gathering optional items module +pub mod cli_pipeline; pub mod config; pub mod logger; +pub mod preboot; pub mod signals; pub mod structs; -pub mod preboot; -pub mod cli_pipeline; \ No newline at end of file diff --git a/noxis-rs/src/options/cli_pipeline.rs b/noxis-rs/src/options/cli_pipeline.rs index 0c13e22..38010cd 100644 --- a/noxis-rs/src/options/cli_pipeline.rs +++ b/noxis-rs/src/options/cli_pipeline.rs @@ -1,88 +1,350 @@ +use super::structs::bus::BusMessage; +use super::structs::Processes; +use crate::options::structs::bus::InternalCli; use log::{error, info}; -use tokio::net::{ UnixStream, UnixListener }; -use tokio::time::{sleep, Duration}; +use noxis_cli::{Cli, ProcessAction}; +use std::any::Any; use std::fs; -use tokio::io::{ AsyncWriteExt, AsyncReadExt}; -use noxis_cli::Cli; +use std::sync::Arc; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::net::{UnixListener, UnixStream}; +use tokio::sync::{Mutex, OnceCell}; +use tokio::time::{sleep, Duration}; + +use super::preboot::PrebootParams; + +type ConfigGateway = tokio::sync::oneshot::Sender; +type ProcessedConfigGateway = Arc>>; +type BusReciever = tokio::sync::mpsc::Receiver; +type BusSender = Arc>; +type ReadyBusReciever = Arc>>; /// # Fn `init_cli_pipeline` /// ## for catching all input requests from CLI -/// +/// /// *input* : - /// -/// *output* : `anyhow::Result<()>` to wrap errors +/// *output* : `anyhow::Result<()>` to wrap errors /// /// *initiator* : fn `main` /// -/// *managing* : `TcpListener` object to handle requests +/// *managing* : `TcpListener` object to handle requests /// /// *depends on* : - -/// -pub async fn init_cli_pipeline() -> anyhow::Result<()> { - let socket_path = "noxis.sock"; +/// +pub async fn init_cli_pipeline( + params: Arc, + config: Arc, + config_gateway: ConfigGateway, + bus_reciever: BusReciever, + bus_sender: BusSender, +) -> anyhow::Result<()> { + let socket_path = ¶ms.self_socket; let _ = fs::remove_file(socket_path); + let config_gateway = Arc::new(Mutex::new(OnceCell::new_with(Some(config_gateway)))); + let bus_reciever = Arc::new(Mutex::new(bus_reciever)); + match UnixListener::bind(socket_path) { Ok(list) => { // TODO: remove `unwrap`s - info!("Listening on {}", socket_path); + info!("Listening on {}", socket_path.display()); + std::env::set_var("NOXIS_SOCKET_PATH", socket_path); loop { match list.accept().await { Ok((socket, _)) => { - // tokio::spawn(); - process_connection(socket).await; - }, + // ??? maybe errors on async work with data transfering between modules + let params = params.clone(); + let config = config.clone(); + let config_gateway = config_gateway.clone(); + let bus_reciever = bus_reciever.clone(); + let bus_sender = bus_sender.clone(); + tokio::spawn(async move { + process_connection( + socket, + params.clone(), + config.clone(), + config_gateway.clone(), + bus_reciever, + bus_sender, + ) + .await; + }); + } Err(er) => { error!("Cannot poll connection to CLI due to {}", er); sleep(Duration::from_millis(300)).await; - }, + } } } // Ok(()) - }, + } Err(er) => { error!("Failed to open UnixListener for CLI"); Err(er.into()) - }, + } } } /// # Fn `process_connection` /// ## for processing input CLI requests -/// +/// /// *input* : mut stream: `TcpStream` /// -/// *output* : - +/// *output* : - /// /// *initiator* : fn `init_cli_pipeline` /// /// *managing* : mutable object of `TcpStream` /// /// *depends on* : `tokio::net::TcpStream` -/// -async fn process_connection(mut stream: UnixStream) { +/// +async fn process_connection( + mut stream: UnixStream, + params: Arc, + config: Arc, + cfg_gateway: ProcessedConfigGateway, + bus_reciever: ReadyBusReciever, + bus_sender: BusSender, +) { let mut buf = vec![0; 1024]; match stream.read(&mut buf).await { Ok(0) => { info!("Client disconnected "); - }, + } Ok(n) => { buf.truncate(n); info!("CLI have sent {} bytes", n); match serde_json::from_slice::(&buf) { Ok(cli) => { info!("Received CLI request: {:?}", cli); - let response = "OK"; - if let Err(e) = stream.write_all(response.as_bytes()).await { - error!("Failed to send response: {}", e); + let response = match process_cli_cmd( + cli, + params.clone(), + config, + cfg_gateway.clone(), + bus_reciever.clone(), + bus_sender.clone(), + ) + .await + { + Ok(response) => response, + Err(er) => { + let error_msg = format!("Error: {}", er); + error!("{}", &error_msg); + error_msg + } + }; + for line in response.lines() { + if let Err(er) = stream.write_all(line.as_bytes()).await { + error!("Failed to send response: {}", er); + } } } Err(e) => { error!("Failed to parse CLI request: {}", e); } } - }, + } Err(e) => error!("Failed to read from socket: {}", e), } let _ = stream.shutdown().await; } + +async fn process_cli_cmd( + cli: Cli, + params: Arc, + global_config: Arc, + cfg_gateway: ProcessedConfigGateway, + bus_reciever: ReadyBusReciever, + bus_sender: BusSender, +) -> anyhow::Result { + use noxis_cli::{Commands, ConfigAction}; + return match cli.command { + Commands::Config(config) => { + match config.action { + ConfigAction::Show(env) => { + if env.is_env { + Ok(serde_json::to_string_pretty(params.as_ref())?) + } else { + /* */ + Ok(serde_json::to_string_pretty(global_config.as_ref())?) + } + } + ConfigAction::Reset => Err(anyhow::Error::msg( + "It's temporarly forbidden to reset current config using CLI-util", + )), + ConfigAction::Local(cfg) => { + if cfg.is_json { + /* */ + let new_config = serde_json::from_str::(&cfg.config)?; + let new_version = new_config.get_version().to_string(); + + use super::{config::config_comparing, structs::ConfigActuality}; + + return match config_comparing(&global_config, &new_config) { + ConfigActuality::Remote => { + let cfg_gateway = cfg_gateway.clone(); + tokio::spawn(async move { + let mut lock = cfg_gateway.lock().await; + match lock.take() { + Some(channel) => { + let _ = channel.send(new_config); + } + None => error!( + "Cannot update confif due to channel sender loss" + ), + } + }); + Ok(format!( + "Ok. Saving and reloading with version {}", + new_version + )) + } + _ => Err(anyhow::Error::msg(format!( + "Local config (version: {}) is more actual", + global_config.get_version() + ))), + }; + } else { + Err(anyhow::Error::msg( + "It's temporarly forbidden to set config in non-json mode", + )) + } + } + ConfigAction::Remote => Ok(params.remote_server_url.clone()), + /* */ + // _ => Err(anyhow::Error::msg("Unrecognized command from CLI")) + } + } + Commands::Processes => { + use crate::options::structs::bus::{BusMessageContentType, BusMessageDirection}; + use crate::utils::metrics::processes::ProcessesQuery; + + let _ = bus_sender + .send(BusMessage::Request( + BusMessageDirection::ToSupervisor, + BusMessageContentType::ProcessQuery, + Box::new(ProcessesQuery::QueryAll), + )) + .await; + + let mut bus = bus_reciever.lock().await; + let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move { + loop { + if let Ok(cont) = bus.try_recv() { + return cont; + } + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + } + }) + .await?; + + if let BusMessage::Response(_, _, content) = resp { + let content: Box = content; + if let Ok(resp) = content.downcast::>() { + return Ok((*resp)?); + } + } + Err(anyhow::Error::msg(format!( + "Unknown type of response from the Supervisor" + ))) + } + Commands::Process(prc) => { + use crate::options::structs::bus::{ + BusMessageContentType, BusMessageDirection, CLiCommand, + }; + + let proc_name = prc.process; + let req = BusMessage::Request( + BusMessageDirection::ToSupervisor, + BusMessageContentType::Cli, + Box::new(match prc.action { + ProcessAction::Start => InternalCli { + prc: proc_name, + cmd: CLiCommand::Start, + }, + ProcessAction::Stop => InternalCli { + prc: proc_name, + cmd: CLiCommand::Stop, + }, + ProcessAction::Restart => InternalCli { + prc: proc_name, + cmd: CLiCommand::Restart, + }, + ProcessAction::Freeze => InternalCli { + prc: proc_name, + cmd: CLiCommand::Freeze, + }, + ProcessAction::Unfreeze => InternalCli { + prc: proc_name, + cmd: CLiCommand::Unfreeze, + }, + /* TODO: ALL CMDS */ + _ => InternalCli { + prc: proc_name, + cmd: CLiCommand::Restart, + }, + }), + ); + let mut bus = bus_reciever.lock().await; + bus_sender.send(req).await?; + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move { + loop { + if let Ok(cont) = bus.try_recv() { + return cont; + } + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + } + }) + .await?; + + if let BusMessage::Response(_, _, content) = resp { + let content: Box = content; + if let Ok(resp) = content.downcast::>() { + return Ok((*resp)?); + } + } + Err(anyhow::Error::msg(format!( + "Unknown type of response from the Supervisor" + ))) + } + Commands::Status => Ok(String::from("Ok")), + Commands::Inspect(mode) => { + use crate::options::structs::bus::{BusMessageContentType, BusMessageDirection}; + let mode = mode.mode; + if let Ok(()) = bus_sender + .send(BusMessage::Request( + BusMessageDirection::ToMetrics, + BusMessageContentType::MetricsModeTransfered, + Box::new(mode), + )) + .await + { + let mut bus_reciever = bus_reciever.lock().await; + sleep(Duration::from_millis(300)).await; + let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move { + loop { + if let Ok(cont) = bus_reciever.try_recv() { + return cont; + } + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + } + }) + .await?; + if let BusMessage::Response(_, _, content) = resp { + let content: Box = content; + if let Ok(resp) = content.downcast::>() { + return Ok((*resp)?); + } + } + return Err(anyhow::Error::msg(format!( + "Unknown type of response from CLI" + ))); + } + Ok(String::from("Ok")) + } + _ => Ok(String::from("Ok")), + }; +} diff --git a/noxis-rs/src/options/config.rs b/noxis-rs/src/options/config.rs index 9e21042..e596b1f 100644 --- a/noxis-rs/src/options/config.rs +++ b/noxis-rs/src/options/config.rs @@ -1,84 +1,68 @@ +use super::preboot::PrebootParams; use super::structs::*; +use crate::utils::files::create_watcher; +use inotify::EventMask; use log::{error, info, warn}; use redis::{Client, Connection}; +use std::fs::File; use std::fs::OpenOptions; use std::io::Write; use std::os::unix::process::CommandExt; use std::process::Command; use std::sync::Arc; use std::{env, fs}; -use super::preboot::PrebootParams; -use tokio::time::{Duration, sleep}; -// use redis::PubSub; -use tokio::sync::{ +use tokio::sync::{ + broadcast::Receiver as BroadcastReceiver, + broadcast::Sender as BroadcastSender, oneshot, - oneshot::{ Receiver as OneShotReciever, Sender as OneShotSender }, - broadcast::Sender as BroadcastSender, broadcast::Receiver as BroadcastReceiver }; -use crate::utils::files::create_watcher; -use std::fs::File; -use inotify::EventMask; + oneshot::{Receiver as OneShotReciever, Sender as OneShotSender}, +}; +use tokio::time::{sleep, Duration}; // const CONFIG_PATH: &str = "settings.json"; pub mod v2 { - use std::path::PathBuf; use crate::utils::get_container_id; + use std::path::PathBuf; use super::*; pub async fn init_config_mechanism( - // to handle cli config changes + // to handle cli config changes cli_oneshot: OneShotReciever, - // to share local config with PRCS, CLI_PIPELINE and CONFIG modules - brd_tx : BroadcastSender, - // preboot params (args) - params : Arc - /*...*/ - ) { + // to share local config with PRCS, CLI_PIPELINE and CONFIG modules + brd_tx: BroadcastSender, + // preboot params (args) + params: Arc, /*...*/ + ) { // channel for pubsub to handle local config pulling let local_config_brd_reciever = brd_tx.subscribe(); - // channel between pub-sub mech and local config mech + // channel between pub-sub mech and local config mech let (tx_pb_lc, rx_pb_lc) = oneshot::channel::(); - // channel between cli mech and local config mech + // channel between cli mech and local config mech let (tx_cli_lc, rx_cli_lc) = oneshot::channel::(); // dbg!("before lc"); let params_clone = params.clone(); let for_lc_path = params.clone(); - let lc_path = for_lc_path - .config - .to_str() - .unwrap_or("settings.json"); + let lc_path = for_lc_path.config.to_str().unwrap_or("settings.json"); // future to init work with local config let lc_future = tokio::spawn( // let params = params.clone(); - local_config_reciever( - params_clone, - rx_pb_lc, - rx_cli_lc, - Arc::new(brd_tx) - ) + local_config_reciever(params_clone, rx_pb_lc, rx_cli_lc, Arc::new(brd_tx)), ); // dbg!("before pb"); // future to init work with pub sub mechanism - let pubsub_future = tokio::spawn( - pubsub_config_reciever( - tx_pb_lc, - params.clone(), - local_config_brd_reciever - ) - ); + let pubsub_future = tokio::spawn(pubsub_config_reciever( + tx_pb_lc, + params.clone(), + local_config_brd_reciever, + )); // dbg!("before cli"); // future to catch new configs from cli pipeline - let cli_future = tokio::spawn( - from_cli_config_reciever( - cli_oneshot, - tx_cli_lc - ) - - ); + let cli_future = tokio::spawn(from_cli_config_reciever(cli_oneshot, tx_cli_lc)); // let _ = lc_future.await; // dbg!("before select"); tokio::select! { @@ -107,7 +91,7 @@ pub mod v2 { Ok(res) => { if res.is_ok() { info!("New config was saved locally, restarting ..."); - } + } else { error!("Pubsub mechanism crushed, restarting ..."); } @@ -136,12 +120,15 @@ pub mod v2 { } // dbg!("after select"); // TODO! futures + select! [OK] - // TODO! tests config + // TODO! tests config } pub async fn get_redis_connection(params: &str) -> Option { for i in 1..=3 { let redis_url = format!("redis://{}/", params); - info!("Trying to connect Redis pubsub `{}`. Attempt {}", &redis_url, i); + info!( + "Trying to connect Redis pubsub `{}`. Attempt {}", + &redis_url, i + ); if let Ok(client) = Client::open(redis_url) { if let Ok(conn) = client.get_connection() { info!("Successfully opened Redis connection"); @@ -154,13 +141,13 @@ pub mod v2 { None } - // loop checking redis pubsub + // loop checking redis pubsub async fn pubsub_config_reciever( // to stop checking local config - local_conf_tx : OneShotSender, - params : Arc, - tx_brd_local : BroadcastReceiver, - ) -> anyhow::Result<()>{ + local_conf_tx: OneShotSender, + params: Arc, + tx_brd_local: BroadcastReceiver, + ) -> anyhow::Result<()> { /*...*/ // dbg!("start of pb"); let mut tx_brd_local = tx_brd_local; @@ -177,23 +164,28 @@ pub mod v2 { }; } }; - match get_redis_connection(&local_config.config_server).await { + match get_redis_connection(¶ms.remote_server_url).await { Some(mut conn) => { let mut pub_sub = conn.as_pubsub(); let channel_name = get_container_id().unwrap_or(String::from("default")); - let channel_name = channel_name.trim(); - match pub_sub.subscribe(channel_name) { + match pub_sub.subscribe(&channel_name) { Err(er) => { error!("Cannot subscribe pubsub channel due to {}", &er); - return Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er))) - }, + return Err(anyhow::Error::msg(format!( + "Cannot subscribe pubsub channel due to {}", + er + ))); + } Ok(_) => { - info!("Successfully subscribed to {} pubsub channel", channel_name); - let _ = pub_sub.set_read_timeout(Some(Duration::from_secs(3))); + info!( + "Successfully subscribed to {} pubsub channel", + &channel_name + ); + let _ = pub_sub.set_read_timeout(Some(Duration::from_secs(1))); loop { if let Ok(msg) = pub_sub.get_message() { // dbg!("ok on get message"); - let payload : Result = msg.get_payload(); + let payload: Result = msg.get_payload(); match payload { Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "), Ok(payload) => { @@ -224,38 +216,35 @@ pub mod v2 { }, } } - // delay + // delay tokio::task::yield_now().await; } - }, + } } - }, + } None => { sleep(Duration::from_secs(20)).await; } } Ok(()) } - - // + + // async fn local_config_reciever( - params : Arc, - pubsub_oneshot : OneShotReciever, - cli_oneshot : OneShotReciever, - brd_tx : Arc>, + params: Arc, + pubsub_oneshot: OneShotReciever, + cli_oneshot: OneShotReciever, + brd_tx: Arc>, /*...*/ - ) -> anyhow::Result<()> { + ) -> anyhow::Result<()> { /*...*/ - // shadowing as mut - let mut pubsub_oneshot = pubsub_oneshot; + // shadowing as mut + let mut pubsub_oneshot = pubsub_oneshot; let mut cli_oneshot = cli_oneshot; // fill with default empty config, mut to change later let mut _current_config = Processes::default(); // PathBuf to &str to work with local config path as slice - let local_config_path = params - .config - .to_str() - .unwrap_or("settings.json"); + let local_config_path = params.config.to_str().unwrap_or("settings.json"); match load_processes(local_config_path) { // if local exists @@ -265,30 +254,30 @@ pub mod v2 { if let Err(er) = brd_tx.send(_current_config.clone()) { error!("Cannot share local config with broadcast due to {}", er); } - }, + } // if local is not exist None => { warn!("Local config wasn't found. Waiting for new ..."); return Err(anyhow::Error::msg("No local config")); // ... - }, + } } - // 100% local exists here + // 100% local exists here // create watcher on local config file match create_watcher("", local_config_path) { Ok(mut watcher) => { loop { - let mut need_to_export_config = false; + let mut need_to_export_config = false; // let mut need_to_recreate_watcher = false; - // return situations here - // 1) oneshot signal + // return situations here + // 1) oneshot signal // 2) if config was deleted -> recreate and fill with current config that is held here // 3) if config was changed -> fill with current config that is held here // catching signal from pubsub // it's because pubsub mech pulled new valid and actual config and now it's time to ... - // ... overwrite local config file and restart main thread + // ... overwrite local config file and restart main thread if let Ok(_) = pubsub_oneshot.try_recv() { sleep(Duration::from_secs(1)).await; return Ok(()); @@ -296,7 +285,7 @@ pub mod v2 { // catching signal from cli // it's because cli mech pulled new valid and actual config and now it's time to ... - // ... overwrite local config file and restart main thread (like in previous mechanism) + // ... overwrite local config file and restart main thread (like in previous mechanism) if let Ok(_) = cli_oneshot.try_recv() { sleep(Duration::from_secs(1)).await; return Ok(()); @@ -313,7 +302,7 @@ pub mod v2 { } else { // changes check let mut buffer = [0; 128]; - let events = watcher.read_events(&mut buffer); + let events = watcher.read_events(&mut buffer); if events.is_ok() { let events: Vec = events .unwrap() @@ -325,16 +314,14 @@ pub mod v2 { if !events.is_empty() { warn!("Local config file was overwritten. Discarding changes ..."); need_to_export_config = true; - // events - // .iter() - // .any(|event| *event == EventMask::DELETE_SELF) - // .then(|| need_to_recreate_watcher = true); } } } // exporting data if need_to_export_config { - if let Err(er) = export_saved_config_data_locally(¶ms.config, &_current_config).await { + if let Err(er) = + export_saved_config_data_locally(¶ms.config, &_current_config).await + { error!("Cannot save actual imported config due to {}", er); } else { // recreation watcher (draining activity buffer mechanism) @@ -349,66 +336,65 @@ pub mod v2 { sleep(Duration::from_millis(300)).await; // tokio::task::yield_now().await; } - }, + } Err(_) => { error!("Cannot create watcher on local config file `{}`. Deinitializing warding local config mechanism...", local_config_path); - return Err(anyhow::Error::msg("Cannot create watcher on local config file")); - }, + return Err(anyhow::Error::msg( + "Cannot create watcher on local config file", + )); + } } - } // [:IN-TEST] async fn from_cli_config_reciever( cli_oneshot: OneShotReciever, - to_local_tx: OneShotSender - ) -> Option { + to_local_tx: OneShotSender, + ) -> Option { /* match awaits til channel*/ // dbg!("start of cli"); loop { if !cli_oneshot.is_empty() { match cli_oneshot.await { Ok(config_from_cli) => { - info!("New actual config `{}` from CLI was pulled. Saving and restaring ...", &config_from_cli.date_of_creation); + info!( + "New actual config `{}` from CLI was pulled. Saving and restaring ...", + &config_from_cli.date_of_creation + ); let _ = to_local_tx.send(true); - return Some(config_from_cli) - }, + return Some(config_from_cli); + } _ => return None, } - } + } sleep(Duration::from_millis(300)).await; } - } async fn export_saved_config_data_locally( - config_file_path: &PathBuf, - current_config: &Processes + config_file_path: &PathBuf, + current_config: &Processes, ) -> anyhow::Result<()> { - let mut file = File::create(config_file_path)?; - file.write_all( - serde_json::to_string_pretty(current_config)?.as_bytes() - )?; + file.write_all(serde_json::to_string_pretty(current_config)?.as_bytes())?; Ok(()) // Ok(()) } } - /// # Fn `load_processes` -/// ## for reading and parsing *local* storing config -/// +/// ## for reading and parsing *local* storing config +/// /// *input* : `&str` /// -/// *output* : `None` if local conf file doesn't exist or invalid | `Some(conf)` on finish reading and parsing +/// *output* : `None` if local conf file doesn't exist or invalid | `Some(conf)` on finish reading and parsing /// /// *initiator* : func `get_actual_config` /// -/// *managing* : conf file name in `&str` format +/// *managing* : conf file name in `&str` format /// /// *depends on* : struct `Processes` -/// +/// fn load_processes(json_filename: &str) -> Option { if let Ok(res) = fs::read_to_string(json_filename) { if let Ok(conf) = serde_json::from_str::(&res) { @@ -418,262 +404,9 @@ fn load_processes(json_filename: &str) -> Option { None } -/// # Fn `get_actual_config` -/// ## for getting actual Monitor's config from local and remote storages -/// -/// *input* : - -/// -/// *output* : `None` on fatal error in mechanisms | `Some(conf)` on finish reading and parsing -/// -/// *initiator* : main thread -/// -/// *managing* : - -/// -/// *depends on* : struct `Processes` -/// -pub async fn get_actual_config(params : Arc) -> Option { - // * if no local conf -> loop and +inf getting conf from redis server - // * if local conf -> once getting conf from redis server - let config_path = params.config.to_str().unwrap_or_else(|| { - error!("Invalid character in config file. Config path was set to default"); - "settings.json" - }); - info!("Configurating config module with params: no-sub={}, local config path={:?}, remote server={}", params.no_sub, params.config, params.remote_server_url); - match load_processes(config_path) { - Some(local_conf) => { - info!( - "Found local configuration, version - {}", - &local_conf.date_of_creation - ); - if !params.no_sub { - if let Some(remote_conf) = - // TODO : rework with pubsub mech - once_get_remote_configuration(&format!("redis://{}/", ¶ms.remote_server_url)) - { - return match config_comparing(&local_conf, &remote_conf) { - ConfigActuality::Local => { - info!("Local config is actual"); - Some(local_conf) - } - ConfigActuality::Remote => { - info!("Pulled config is more actual. Saving changes!"); - if save_new_config(&remote_conf, config_path).is_err() { - error!("Saving changes process failed due to unexpected error...") - } - Some(remote_conf) - } - }; - } - } - Some(local_conf) - } - None => { - warn!("No local valid conf was found. Trying to pull remote one..."); - if !params.no_sub { - let mut conn = get_connection_watcher(&open_watcher(&format!("redis://{}/", ¶ms.remote_server_url))); - if let Some(conf) = get_remote_conf_watcher(&mut conn).await { - info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation); - let _ = save_new_config(&conf, config_path); - return Some(conf); - } - } - None - } - } -} - -/// # Fn `get_remote_conf_watcher` -/// ## for infinitive pulling remote config -/// -/// *input* : `&mut Connection` -/// -/// *output* : `None` on fatal error | `Some(conf)` on succesfull pulling -/// -/// *initiator* : fn `get_actual_config` -/// -/// *managing* : mut ref `Connection` object -/// -/// *depends on* : struct `Processes` -/// -async fn get_remote_conf_watcher(conn : &mut Connection) -> Option { - let mut conn = conn.as_pubsub(); - let cont = crate::utils::get_container_id(); - loop { - match cont { - Some(ref cont) => { - let cont = cont.trim(); - if conn.subscribe(cont).is_err() { - // todo : delay - continue; - } - match conn.get_message() { - Ok(msg) => { - let msg: Result = msg.get_payload(); - if let Ok(payload) = msg { - if let Some(remote) = parse_extern_config(&payload) { - return Some(remote) - } - else { - error!("Pulled invalid config, cannot start. Waiting for remote conf..."); - } - } else { - error!("Cannot get Redis message payload. Waiting for remote conf..."); - } - // todo : delay - continue; - }, - Err(_) => { - // todo : delay - continue; - }, - } - }, - None => { - error!("Cannot get container id. Returning"); - break - }, - } - } - None -} - -/// # Fn `get_remote_conf_watcher` -/// ## for trying to pull remote config -/// -/// > only for situation when local isn't None (no need to fck redis server) -/// -/// *input* : `&str` -/// -/// *output* : `None` on empty pubsub or error | `Some(conf)` on succesfull pulling -/// -/// *initiator* : fn `get_actual_config` -/// -/// *managing* : &str of Redis Server credentials -/// -/// *depends on* : struct `Processes` -/// -fn once_get_remote_configuration(serv_info: &str) -> Option { - let cont = crate::utils::get_container_id(); - match Client::open(serv_info) { - Ok(client) => { - match client.get_connection() { - Ok(mut conn) => { - let mut conn = conn.as_pubsub(); - match conn.subscribe(cont) { - Ok(_) => { - if conn.set_read_timeout(Some(Duration::from_millis(100))).is_err() { - error!("Cannot set reading pubsub timeout and pull remote config"); - return None; - } - match conn.get_message() { - Ok(msg) => { - info!("Pulled config from Redis Server"); - let get_payload: Result = msg.get_payload(); - match get_payload { - Ok(payload) => { - let remote = parse_extern_config(&payload); - if remote.is_none() { - error!("Pulled config is invalid. Check it in Redis Server"); - } - remote - }, - Err(_) => { - error!("Cannot extract payload from new message. Check Redis Server state"); - None - }, - } - }, - Err(_) => { - None - }, - } - }, - Err(_) => { - error!("Redis subscription process failed. Check Redis configuration!"); - None - } - } - } - Err(_) => { - error!("Redis connection attempt is failed. Check Redis configuration!"); - None - } - } - } - Err(_) => { - error!("Redis-Client opening attempt is failed. Check network configuration!"); - None - } - } -} - -// ! watchers - -/// # Fn `open_watcher` -/// ## for infinitive opening Redis client -/// -/// > only for situation when local isn't None (no need to fck redis server) -/// -/// *input* : `Option` -/// -/// *output* : redis::Client on successful opening client -/// -/// *initiator* : fn `get_actual_config` -/// -/// *managing* : &str of Redis Server credentials -/// -/// *depends on* : struct `redis::Client` -/// -fn open_watcher(serv_info: &str) -> Client { - loop { - match Client::open(serv_info) { - Ok(redis) => { - info!("Successfully opened Redis-Client"); - return redis; - } - Err(_) => { - error!("Redis-Client opening attempt is failed. Check network configuration! Retrying..."); - std::thread::sleep(Duration::from_secs(4)); - } - } - } -} - -/// # Fn `get_connection_watcher` -/// ## for infinitive establishing Redis connection on existing client -/// -/// > only for situation when local isn't None (no need to fck redis server) -/// -/// *input* : `&Client` -/// -/// *output* : `Connection` -/// -/// *initiator* : fn `get_actual_config` -/// -/// *managing* : &Client for opening connection -/// -/// *depends on* : struct `redis::Connection` -/// -fn get_connection_watcher(client: &Client) -> Connection { - loop { - match client.get_connection() { - Ok(conn) => { - info!("Successfully got Redis connection object"); - return conn; - } - Err(_) => { - error!( - "Redis connection attempt is failed. Check Redis configuration! Retrying..." - ); - std::thread::sleep(Duration::from_secs(4)); - } - } - } -} - /// # Fn `restart_main_thread` /// ## for restart monitor with new config -/// +/// /// *input* : - /// /// *output* : `Ok(())` on valid restart | `Err(er)` on error @@ -683,93 +416,16 @@ fn get_connection_watcher(client: &Client) -> Connection { /// *managing* : - /// /// *depends on* : - -/// +/// fn restart_main_thread() -> std::io::Result<()> { let current_exe = env::current_exe()?; - Command::new(current_exe).exec(); + let _ = Command::new(current_exe).exec(); Ok(()) } -/// # Fn `subscribe_config_stream` -/// ## for subscribe on changes, pulling to Redis pubsub to get more actual config -/// -/// *input* : `Arc` -/// -/// *output* : `Ok(())` on end of work | `Err(er)` on error with subscribing mechanism -/// -/// *initiator* : fn `subscribe_config_stream` -/// -/// *managing* : `Arc` to compare old config with new pulled -/// -/// *depends on* : `Processes` -/// -pub async fn subscribe_config_stream(actual_prcs: Arc, params: Arc) -> Result<(), CustomError> { - let config_path = params.config.to_str().unwrap_or_else(|| "settings.json"); - - if params.no_sub { - return Err(CustomError::Fatal); - } - if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) { - if let Ok(mut conn) = client.get_connection() { - match crate::utils::get_container_id() { - Some(channel_name) => { - let channel_name = channel_name.trim(); - let mut pubsub = conn.as_pubsub(); - if pubsub.subscribe(&channel_name).is_ok() { - info!("Runner subscribed on config update publishing in channel {}", &channel_name); - loop { - if let Ok(msg) = pubsub.get_message() { - let get_remote_config: Result = msg.get_payload(); - match get_remote_config { - Ok(payload) => { - if let Some(remote_config) = parse_extern_config(&payload) { - match config_comparing(&actual_prcs, &remote_config) { - ConfigActuality::Remote => { - warn!("Pulled config is actual. Saving and restarting..."); - if save_new_config(&remote_config, config_path).is_err() { - error!("Error with saving new config to {}. Stopping sub mechanism...", config_path); - return Err(CustomError::Fatal); - } - if restart_main_thread().is_err() { - error!("Error with restarting Runner. Stopping sub mechanism..."); - return Err(CustomError::Fatal); - } - } - _ => { - warn!("Pulled new config. Current config is more actual ..."); - continue - }, - } - } - else { - error!("Invalid conig was pulled"); - } - }, - Err(_) => { - error!("Cannot extract new config from message"); - break; - }, - } - } - sleep(Duration::from_secs(30)).await; - } - } else { - error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name); - } - }, - None => { - error!("Cannot get channel name"); - } - } - } - } - error!("Error with subscribing Redis stream on update. Working only with selected config..."); - Err(CustomError::Fatal) -} - /// # Fn `config_comparing` /// ## for compare old and new configs -/// +/// /// *input* : local: `&Processes`, remote: `&Processes` /// /// *output* : `ConfigActuality::Local` or `ConfigActuality::Remote` @@ -779,8 +435,8 @@ pub async fn subscribe_config_stream(actual_prcs: Arc, params: Arc ConfigActuality { +/// +pub fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActuality { if local.is_default() { return ConfigActuality::Remote; } @@ -793,17 +449,9 @@ fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActuality { } } -// ! TEMPORARILY DEPRECATED ! -// fn native_date_from_millis(mls: &str) -> Option> { -// match mls.parse::(){ -// Ok(val) => return chrono::DateTime::from_timestamp_millis(val), -// Err(_) => return None, -// } -// } - /// # Fn `save_new_config` /// ## mechanism for saving new config in local storage -/// +/// /// *input* : `&Processes`, `&str` /// /// *output* : `Ok(())` on succesfull saving | Err(er) on fs error @@ -813,7 +461,7 @@ fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActuality { /// *managing* : new config object: `&Processes` and config file name: `&str` /// /// *depends on* : `Processes` -/// +/// fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomError> { match serde_json::to_string_pretty(&config) { // Ok(st) => match fs::write(config_file, st) { @@ -844,7 +492,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr /// # Fn `parse_extern_config` /// ## for parsing &str to Processes -/// +/// /// *input* : `&str` /// /// *output* : parsed config in Some(Processes) | None on error with parsing @@ -854,7 +502,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr /// *managing* : unparsed config `&str` /// /// *depends on* : `Processes` -/// +/// fn parse_extern_config(json_string: &str) -> Option { if let Ok(des) = serde_json::from_str::(json_string) { return Some(des); @@ -879,28 +527,20 @@ mod config_unittests { // old one (kinda local) let a = Processes { date_of_creation: String::from("1"), - config_server: String::new(), processes: vec![], }; // new one (kinda remote) let b = Processes { date_of_creation: String::from("2"), - config_server: String::new(), processes: vec![], }; assert_eq!(config_comparing(&a, &b), ConfigActuality::Remote); } - // TODO : strange output - // #[test] - // fn get_actual_config_mechanism() { - // assert!(get_actual_config().is_some()) - // } #[test] fn save_config() { let a = Processes { date_of_creation: String::from("1"), - config_server: String::new(), processes: vec![], }; assert!(save_new_config(&a, "tests/examples/save-conf.json").is_ok()); @@ -910,7 +550,6 @@ mod config_unittests { fn save_to_zero_file() { let a = Processes { date_of_creation: String::from("1"), - config_server: String::new(), processes: vec![], }; assert!(save_new_config(&a, "tests/examples/none.json").is_ok()); diff --git a/noxis-rs/src/options/logger.rs b/noxis-rs/src/options/logger.rs index 14cd92c..8332b27 100644 --- a/noxis-rs/src/options/logger.rs +++ b/noxis-rs/src/options/logger.rs @@ -15,7 +15,7 @@ use crate::utils::get_container_id; /// # Fn `setup_logger` /// ## for initializing process of unstoppable grubbing metrics. -/// +/// /// *input* : `Result<()>` /// /// *output* : `Err` if it cant create logger | `Ok` after logger initialing @@ -25,7 +25,7 @@ use crate::utils::get_container_id; /// *managing* : - /// /// *depends on* : - -/// +/// pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> { // if Command::new("sh").args(["-c", "mkdir logs"]).output().is_err() { // println!("Error: Cannot init logs directory"); @@ -49,7 +49,7 @@ pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> { record.args(), ) }) - .filter(None, LevelFilter::Info) + .filter(None, LevelFilter::from_env()) .target(env_logger::Target::Stdout) // temporary deprecated // .target(env_logger::Target::Pipe(log_target)) @@ -58,6 +58,27 @@ pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> { Ok(()) } +trait FromEnv { + fn from_env() -> LevelFilter; +} + +impl FromEnv for LevelFilter { + fn from_env() -> LevelFilter { + return match std::env::var("NOXIS_MAX_LOG_LEVEL") { + Ok(var) => match var.to_ascii_lowercase().trim().as_ref() { + "trace" => LevelFilter::Trace, + "debug" => LevelFilter::Debug, + "info" => LevelFilter::Info, + "error" => LevelFilter::Error, + "warn" => LevelFilter::Warn, + "off" => LevelFilter::Off, + _ => LevelFilter::Info, + }, + Err(_) => LevelFilter::Info, + }; + } +} + #[cfg(test)] mod logger_tests { use super::*; diff --git a/noxis-rs/src/options/preboot.rs b/noxis-rs/src/options/preboot.rs index d21cd57..22eab0b 100644 --- a/noxis-rs/src/options/preboot.rs +++ b/noxis-rs/src/options/preboot.rs @@ -1,118 +1,51 @@ -// module to handle pre-boot params of the monitor +//! +//! Module to handle `pre-boot params` of the monitor (calling also as `settings`) +//! #[allow(unused_imports)] -use anyhow::{Result, Ok, Error}; -use clap::Parser; -use std::path::PathBuf; -use std::env::var; +use anyhow::{Error, Result}; use dotenv::dotenv; - -const SOCKET_PATH: &str = "/var/run/enode/hostagent.sock"; - -/// -enum EnvVars { - NoxisNoHagent, - NoxisNoLogs, - NoxisRefreshLogs, - // NoxisNoRemoteConfig, - NoxisNoConfigSub, - NoxisSocketPath, - NoxisLogTo, - NoxisRemoteServerUrl, - NoxisConfig, - NoxisMetrics, -} - -/// -impl std::fmt::Display for EnvVars { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - EnvVars::NoxisNoHagent => write!(f, "NOXIS_NO_HAGENT"), - EnvVars::NoxisNoLogs => write!(f, "NOXIS_NO_LOGS"), - EnvVars::NoxisRefreshLogs => write!(f, "NOXIS_REFRESH_LOGS"), - // EnvVars::NoxisNoRemoteConfig => write!(f, "NOXIS_NO_REMOTE_CONFIG"), - EnvVars::NoxisNoConfigSub => write!(f, "NOXIS_NO_CONFIG_SUB"), - EnvVars::NoxisSocketPath => write!(f, "NOXIS_SOCKET_PATH"), - EnvVars::NoxisLogTo => write!(f, "NOXIS_LOG_TO"), - EnvVars::NoxisRemoteServerUrl => write!(f, "NOXIS_REMOTE_SERVER_URL"), - EnvVars::NoxisConfig => write!(f, "NOXIS_CONFIG"), - EnvVars::NoxisMetrics => write!(f, "NOXIS_METRICS"), - } - } -} - -/// -impl<'a> EnvVars { - // Default trait func is not satisfying this issue - fn default(self) -> &'a str { - match self { - EnvVars::NoxisNoHagent => "false", - EnvVars::NoxisNoLogs => "false", - EnvVars::NoxisRefreshLogs => "false", - // EnvVars::NoxisNoRemoteConfig => "false", - EnvVars::NoxisNoConfigSub => "false", - EnvVars::NoxisSocketPath => "/var/run/enode/hostagent.sock", - EnvVars::NoxisLogTo => "./", - EnvVars::NoxisRemoteServerUrl => "localhost", - EnvVars::NoxisConfig => "./settings.json", - EnvVars::NoxisMetrics => "full", - } - } - fn process_env_var(self, preboot_value: &str) { - // let default = self.default(); - match var(self.to_string()) { - std::result::Result::Ok(val) => { - if val != preboot_value { - std::env::set_var(self.to_string(), self.default()); - } - }, - Err(_) => { - std::env::set_var(self.to_string(), preboot_value); - }, - } - } - pub fn setup(preboot: &PrebootParams) { - // setup default if not exists - // check values and save preboot states in env vars if not equal - - Self::NoxisNoHagent.process_env_var(&preboot.no_hostagent.to_string()); - Self::NoxisNoLogs.process_env_var(&preboot.no_logs.to_string()); - Self::NoxisRefreshLogs.process_env_var(&preboot.refresh_logs.to_string()); - // Self::NoxisNoRemoteConfig.process_env_var(&preboot.no_remote_config.to_string()); - Self::NoxisNoConfigSub.process_env_var(&preboot.no_sub.to_string()); - Self::NoxisSocketPath.process_env_var(preboot.socket_path.to_str().unwrap()); - Self::NoxisLogTo.process_env_var(preboot.log_to.to_str().unwrap()); - Self::NoxisRemoteServerUrl.process_env_var(&preboot.remote_server_url); - Self::NoxisConfig.process_env_var(preboot.config.to_str().unwrap()); - Self::NoxisMetrics.process_env_var(&preboot.metrics.to_string()); - - } -} +use log::warn; +use std::env::var; +use std::path::PathBuf; /// # Enum `MetricsPrebootParams` -/// ## for setting up metrics mode as preboot param from command prompt -/// +/// ## for setting up metrics mode as preboot param from command prompt +/// /// examples: /// ``` bash -/// noxis-rs ... --metrics full -/// noxis-rs ... --metrics system +/// noxis-rs ... --metrics full +/// noxis-rs ... --metrics system /// noxis-rs ... --metrics processes /// noxis-rs ... --metrics net -/// noxis-rs ... --metrics none +/// noxis-rs ... --metrics none /// ``` -/// -#[derive(clap::ValueEnum, Debug, Clone)] +/// +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub enum MetricsPrebootParams { - Full, + Full, System, Processes, Net, None, } +impl MetricsPrebootParams { + fn from_env(var: &str) -> Self { + match var.trim().to_lowercase().as_str() { + "full" => Self::Full, + "system" => Self::System, + "processes" => Self::Processes, + "net" => Self::Net, + "none" => Self::None, + _ => Self::Full, + } + } +} + /// # `std::fmt::Display` implementation for `MetricsPrebootParams` -/// ## to enable parsing object to String -impl std::fmt::Display for MetricsPrebootParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { +/// ## to enable parsing object to String +impl std::fmt::Display for MetricsPrebootParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { MetricsPrebootParams::Full => write!(f, "full"), MetricsPrebootParams::System => write!(f, "system"), @@ -120,260 +53,317 @@ impl std::fmt::Display for MetricsPrebootParams { MetricsPrebootParams::Net => write!(f, "net"), MetricsPrebootParams::None => write!(f, "none"), } - } -} + } +} -/// # struct `PrebootParams` -/// ## to parse and set up all modes as preboot params from command prompt -/// -/// ### args : -/// -/// `--no-hagent` - to disable hagent work module and set up work mode as autonomous -/// ### usage : -/// ``` bash -/// noxis-rs ... --no-hagent ... +/// struct to handle Noxis settings (that were set as `.env` or global env vars) +/// +/// to parse and set up all modes as preboot params from `.env` or using `export $VAR $VAL` command +/// +/// # Settings : +/// +/// All settings are divided by `actions` and `values`. +/// 1. `Actions` - true or false on var exist check +/// > `Actions` is not about values, it can handle any data +/// +/// 2. `Values` - classic string-driven environment variables +/// +/// # `Actions` vars +/// +/// 1. `NOXIS_NO_HAGENT` - to disable hagent work module and set up work mode as autonomous +/// +/// **usage** : +/// ``` toml +/// ... +/// NOXIS_NO_HAGENT = "random-text" +/// ... /// ``` -/// -/// -/// `--no-logs` - to disable logging at all -/// ### usage : +/// or /// ``` bash -/// noxis-rs ... --no-logs ... -/// ``` -/// -/// `--refresh-logs` - to truncate logs directory -/// ### usage : -/// ``` bash -/// noxis-rs ... --refresh-logs ... -/// ``` -/// -/// `--no-sub` - to disable Redis subscribtion mechanism -/// ### usage : -/// ``` bash -/// noxis-rs ... --no-sub ... -/// ``` -/// -/// `--socket-path` - to set Unix Domain Socket file's directory -/// ### usage : -/// ``` bash -/// noxis-rs ... --socket-path /var/run/enode/hostagent.sock ... +/// export NOXIS_NO_HAGENT "random-text" /// ``` /// -/// `--log-to` - to set directory for logs -/// ### usage : -/// ``` bash -/// noxis-rs ... --log-to /dir/to/logs/ ... +/// +/// 2. `NOXIS_NO_LOGS` - to disable logging at all +/// **usage** : +/// ``` toml +/// ... +/// NOXIS_NO_LOGS = "random-text" +/// ... /// ``` -/// -/// `--remote-server-url` - to set Redis Server -/// ### usage : +/// or /// ``` bash -/// noxis-rs ... --remote-server-url 192.168.28.12 ... +/// export NOXIS_NO_LOGS "random-text" /// ``` -/// -/// `--config` - to set Noxis' config full path +/// +/// 3. `NOXIS_REFRESH_LOGS` - to truncate logs directory +/// /// ### usage : -/// ``` bash -/// noxis-rs ... --config /etc/enode/settings.json ... +/// ``` toml +/// ... +/// NOXIS_REFRESH_LOGS = "random-text" +/// ... /// ``` -/// -/// `--metrics` - to set metrics mode +/// or +/// ``` bash +/// export NOXIS_REFRESH_LOGS "random-text" +/// ``` +/// +/// 4. `NOXIS_NO_SUB` - to disable Redis subscribtion mechanism +/// /// ### usage : -/// ``` bash -/// noxis-rs ... --metrics full ... +/// ``` toml +/// NOXIS_NO_SUB = "random-text" /// ``` -#[derive(Debug, Parser)] +/// or +/// ``` bash +/// export NOXIS_NO_SUB "random-text" +/// ``` +/// +/// # `Values` vars +/// +/// 1. `NOXIS_HAGENT_SOCKET_PATH` - to set Unix Domain Socket file's directory +/// +/// ### usage : +/// ``` toml +/// NOXIS_HAGENT_SOCKET_PATH = "/var/run/example/hostagent.sock" +/// ``` +/// or +/// ``` bash +/// export NOXIS_HAGENT_SOCKET_PATH "/var/run/example/hostagent.sock" +/// ``` +/// +/// 2 `NOXIS_LOG_TO` - to set directory for logs +/// +/// ### usage : +/// ``` toml +/// NOXIS_LOG_TO = "/var/log/noxis/noxis.log" +/// ``` +/// or +/// ``` bash +/// export NOXIS_LOG_TO "/var/log/noxis/noxis.log" +/// ``` +/// +/// 3. `NOXIS_REMOTE_SERVER_URL` - to set Redis Server +/// +/// ### usage : +/// ``` toml +/// NOXIS_REMOTE_SERVER_URL = "ip.ip.ip.ip:port" +/// ``` +/// or +/// ``` bash +/// export NOXIS_REMOTE_SERVER_URL "ip.ip.ip.ip:port" +/// ``` +/// +/// 4. `NOXIS_CONFIG_PATH` - to set Noxis' config full path +/// +/// ### usage : +/// ``` toml +/// NOXIS_CONFIG_PATH = "./settings.json" +/// ``` +/// or +/// ``` bash +/// export NOXIS_CONFIG_PATH "./settings.json" +/// ``` +/// +/// 5. `NOXIS_METRICS_MODE` - to set metrics mode +/// +/// ### usage : +/// ``` toml +/// NOXIS_METRICS_MODE = "full" +/// ``` +/// or +/// ``` bash +/// export NOXIS_METRICS_MODE "full" +/// ``` +/// +#[derive(Debug, serde::Serialize, serde::Deserialize)] pub struct PrebootParams { - // actions - #[arg( - long = "no-hagent", - action, - conflicts_with="socket_path", - help="To disable work with host-agent" - )] - pub no_hostagent : bool, - #[arg( - long = "no-logs", - action, - conflicts_with="log_to", - help="To disable logs" - )] + // pub no_hostagent : bool, pub no_logs: bool, - #[arg( - long = "refresh-logs", - action, - conflicts_with="no_logs", - help="To clear logs directory" - )] - pub refresh_logs : bool, - // #[arg( - // long = "no-remote-config", - // action, - // help="To disable work with remote config server", - // conflicts_with="no_sub")] - // pub no_remote_config : bool, - #[arg( - long = "no-sub", - action, - help="To disable Redis subscription mechanism", - )] - // conflicts_with="no_remote_config" - pub no_sub : bool, - - // params (socket_path, log_to, remote_server_url, config) - #[arg( - long = "socket-path", - default_value="/var/run/enode/hostagent.sock", - conflicts_with="no_hostagent", - help="To set .sock file's path used in communication with host-agent" - )] - pub socket_path : PathBuf, - #[arg( - long = "log-to", - default_value="./", - conflicts_with="no_logs", - help="To set a path to logs directory" - )] - pub log_to : PathBuf, - #[arg( - long = "remote-server-url", - default_value="localhost", - conflicts_with="no_sub", - help = "To set url of remote config server using in remote config pulling mechanism" - )] - pub remote_server_url : String, - #[arg( - long = "config", - short, - default_value="settings.json", - help="To set local config file path" - )] - pub config : PathBuf, - - // value enum params (metrics) - #[arg( - long = "metrics", - short, - default_value_t=MetricsPrebootParams::Full, - help="To set metrics grubbing mode" - )] + pub refresh_logs: bool, + pub no_sub: bool, + // pub socket_path : PathBuf, + pub log_to: PathBuf, + pub remote_server_url: String, + pub config: PathBuf, pub metrics: MetricsPrebootParams, + pub self_socket: PathBuf, + pub backup_folder: PathBuf, } /// # implementation for `MetricsPrebootParams` /// ## to enable validation mechanism impl PrebootParams { - pub fn validate(mut self) -> Result { + pub fn validate() -> Self { dotenv().ok(); - if !self.socket_path.exists() && !self.no_hostagent { - if self.socket_path.to_string_lossy() == SOCKET_PATH { - self.no_hostagent = true; - eprintln!("Warning: Socket-file wasn't found. Working without hostagent module..."); - } else { - eprintln!("Warning: Socket-file wasn't found or Noxis can't read it. Socket-file was set to default"); - if !PathBuf::from(SOCKET_PATH).exists() { - self.no_hostagent = true; - eprintln!("Warning: Socket-file wasn't found. Working without hostagent module..."); - } else { - self.socket_path = PathBuf::from(SOCKET_PATH); + Self { + // bool + // no_hostagent : { + // match var("NOXIS_NO_HAGENT") { + // Ok(_) => true, + // Err(_) => false, + // } + // }, + no_logs: { + match var("NOXIS_NO_LOGS") { + Ok(_) => true, + Err(_) => false, } - } - // return Err(Error::msg("Socket-file not found or Noxis can't read it. Cannot start")); + }, + refresh_logs: { + match var("NOXIS_REFRESH_LOGS") { + Ok(_) => true, + Err(_) => false, + } + }, + no_sub: { + match var("NOXIS_NO_SUB") { + Ok(_) => true, + Err(_) => false, + } + }, + // vals + // socket_path : { + // match var("NOXIS_HAGENT_SOCKET_PATH") { + // Ok(val) => PathBuf::from(val), + // Err(_) => PathBuf::from("/var/run/enode/hostagent.sock"), + // } + // }, + log_to: { + match var("NOXIS_LOG_TO") { + Ok(val) => PathBuf::from(val), + Err(_) => PathBuf::from("./"), + } + }, + remote_server_url: { + match var("NOXIS_REMOTE_SERVER_URL") { + Ok(val) => val, + Err(_) => String::from("localhost"), + } + }, + config: { + match var("NOXIS_CONFIG_PATH") { + Ok(val) => PathBuf::from(val), + Err(_) => PathBuf::from("./settings.json"), + } + }, + metrics: { + match var("NOXIS_METRICS_MODE") { + Ok(val) => MetricsPrebootParams::from_env(&val), + Err(_) => MetricsPrebootParams::Full, + } + }, + self_socket: { + match var("NOXIS_SOCKET_PATH") { + Ok(val) => PathBuf::from(val), + Err(_) => { + let default = std::env::current_dir() + .expect("Crushed on getting current_dir path. Check fs state!"); + warn!( + "$NOXIS_SOCKET_PATH wans't set. Default value - {}", + default.display() + ); + PathBuf::from(default) + } + } + }, + backup_folder: { + match var("NOXIS_BACKUP_FOLDER") { + Ok(val) => { + let path = PathBuf::from(val); + if path.exists() && path.is_dir() { + path + } else { + PathBuf::from(std::env::current_dir() + .expect("Crushed on getting current_dir path. Check fs state!") + ) + } + }, + Err(_) => { + let default = std::env::current_dir() + .expect("Crushed on getting current_dir path. Check fs state!"); + warn!( + "$NOXIS_BACKUP_FOLDER wans't set. Default value - {}", + default.display() + ); + PathBuf::from(default) + } + } + }, } - // existing log dir - if !self.log_to.exists() && !self.no_logs { - eprintln!("Error: Log-Dir not found or Noxis can't read it. LogDir was set to default"); - self.refresh_logs = false; - self.log_to = PathBuf::from("./"); - // return Err(Error::msg("Log Directory Not Found or Noxis can't read it. Cannot start")); - } - // existing sock file - if !self.config.exists() { - eprintln!("Error: Invalid character in config file. Config path was set to default"); - // TODO : ??? wtf is going with 2 paths - let config = PathBuf::from("/etc/enode/noxis/settings.json"); - if !config.exists() && self.no_sub { - return Err(Error::msg("Noxis cannot run without config. Create local config or enable pubsub mechanism")); - } - self.config = PathBuf::from("settings.json"); - // return Err(Error::msg("Local Config Not Found or Noxis can't read it. Cannot start")); - } - // redis server check - EnvVars::setup(&self); - Ok(self) } } - // unit tests of preboot params parsing mech -#[cfg(test)] -mod preboot_unitests{ - use super::*; +// #[cfg(test)] +// mod preboot_unitests{ +// use super::*; - #[test] - fn parsing_zero_args() { - assert!(PrebootParams::try_parse_from(vec!["runner-rs"]).is_ok()) - } - #[test] - fn parsing_hagent_valid_args() { - assert!(PrebootParams::try_parse_from(vec![ - "runner-rs", - "--socket-path", "/path/to/socket" - ]).is_ok()) -} - #[test] - fn parsing_hagent_invalid_args() { - assert!(PrebootParams::try_parse_from(vec![ - "runner-rs", - "--socket-path", "/path/to/socket", - "--no-hagent" - ]).is_err()) - } - #[test] - fn parsing_log_valid_args() { - assert!(PrebootParams::try_parse_from(vec![ - "runner-rs", - "--log-to", "/path/to/log/dir" - ]).is_ok()) - } - #[test] - fn parsing_log_invalid_args() { - assert!(PrebootParams::try_parse_from(vec![ - "runner-rs", - "--log-to /path/to/log/dir", - "--no-logs" - ]).is_err()) - } - #[test] - fn parsing_config_valid_args() { - assert!(PrebootParams::try_parse_from(vec![ - "runner-rs", - "--no-sub", - "--remote-server-url", "redis://127.0.0.1" - ]).is_err()) - } - // #[test] - // fn parsing_config_invalid_args_noremote_nosub() { - // assert!(PrebootParams::try_parse_from(vec![ - // "runner-rs", - // "--no-remote-config", "--no-sub" - // ]).is_err()) - // } - #[test] - fn parsing_config_invalid_args_noremote_remoteurl() { - assert!(PrebootParams::try_parse_from(vec![ - "runner-rs", - "--no-sub", - "--remote-server-url", "redis://127.0.0.1" - ]).is_err()) - } - #[test] - fn parsing_metrics_args_using_value_enum() { - assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "full"]).is_ok()); - assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "system"]).is_ok()); - assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "processes"]).is_ok()); - assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "net"]).is_ok()); - assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "none"]).is_ok()); - assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "unusual_value"]).is_err()); - } -} \ No newline at end of file +// #[test] +// fn parsing_zero_args() { +// assert!(PrebootParams::try_parse_from(vec!["runner-rs"]).is_ok()) +// } +// #[test] +// fn parsing_hagent_valid_args() { +// assert!(PrebootParams::try_parse_from(vec![ +// "runner-rs", +// "--socket-path", "/path/to/socket" +// ]).is_ok()) +// } +// #[test] +// fn parsing_hagent_invalid_args() { +// assert!(PrebootParams::try_parse_from(vec![ +// "runner-rs", +// "--socket-path", "/path/to/socket", +// "--no-hagent" +// ]).is_err()) +// } +// #[test] +// fn parsing_log_valid_args() { +// assert!(PrebootParams::try_parse_from(vec![ +// "runner-rs", +// "--log-to", "/path/to/log/dir" +// ]).is_ok()) +// } +// #[test] +// fn parsing_log_invalid_args() { +// assert!(PrebootParams::try_parse_from(vec![ +// "runner-rs", +// "--log-to /path/to/log/dir", +// "--no-logs" +// ]).is_err()) +// } +// #[test] +// fn parsing_config_valid_args() { +// assert!(PrebootParams::try_parse_from(vec![ +// "runner-rs", +// "--no-sub", +// "--remote-server-url", "redis://127.0.0.1" +// ]).is_err()) +// } +// // #[test] +// // fn parsing_config_invalid_args_noremote_nosub() { +// // assert!(PrebootParams::try_parse_from(vec![ +// // "runner-rs", +// // "--no-remote-config", "--no-sub" +// // ]).is_err()) +// // } +// #[test] +// fn parsing_config_invalid_args_noremote_remoteurl() { +// assert!(PrebootParams::try_parse_from(vec![ +// "runner-rs", +// "--no-sub", +// "--remote-server-url", "redis://127.0.0.1" +// ]).is_err()) +// } +// #[test] +// fn parsing_metrics_args_using_value_enum() { +// assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "full"]).is_ok()); +// assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "system"]).is_ok()); +// assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "processes"]).is_ok()); +// assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "net"]).is_ok()); +// assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "none"]).is_ok()); +// assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "unusual_value"]).is_err()); +// } +// } diff --git a/noxis-rs/src/options/signals.rs b/noxis-rs/src/options/signals.rs index f840510..6180ff6 100644 --- a/noxis-rs/src/options/signals.rs +++ b/noxis-rs/src/options/signals.rs @@ -1,4 +1,3 @@ -use super::structs::CustomError; use std::sync::Arc; use tokio::io; use tokio::sync::mpsc; @@ -11,17 +10,17 @@ type SendersVec = Arc>>>; /// # Fn set_valid_destructor /// ## for initializing process of unstoppable grubbing metrics. -/// +/// /// *input* : `Result<()>` /// -/// *output* : `Err` if it cant create signals listeners | `Ok` on returning Monitor +/// *output* : `Err` if it cant create signals listeners | `Ok` on returning Monitor /// /// *initiator* : main thread /// /// *managing* : `Arc>>>` /// /// *depends on* : Sig, Signals -/// +/// pub async fn set_valid_destructor(senders: SendersVec) -> anyhow::Result<()> { let (mut int, mut term, mut stop) = ( Sig::new(Signals::Sigint, senders.clone()), @@ -38,9 +37,9 @@ pub async fn set_valid_destructor(senders: SendersVec) -> anyhow::Result<()> { } /// # Enum Signals /// ## for instancing each managed system signals (such as SIGINT) -/// +/// /// > (element needed in Sig constructor's signature) -/// +/// /// *depends on* : - enum Signals { Sigint, @@ -50,9 +49,9 @@ enum Signals { /// # Struct Signals /// ## for instancing each managed system signals (such as SIGINT) -/// +/// /// > (needed to construct system signals listener) -/// +/// /// *depends on* : Signals struct Sig { signal: Signal, @@ -70,7 +69,7 @@ impl Sig { } } /// ## trait Display realization for returning String-name of signal -/// +/// /// > (needed in logs) impl std::fmt::Display for Signals { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -91,20 +90,20 @@ impl Signals { } } } -/// # Trait SigPostProcessing +/// # Trait SigPostProcessing /// ## to handle post-processing jobs after getting system signal -/// +/// /// ## > (needed in signals post-processing) -/// +/// trait SigPostProcessing { async fn post_processing(&mut self) -> io::Result<()>; } -/// # Trait SigPostProcessing realization for Sig struct +/// # Trait SigPostProcessing realization for Sig struct /// ## to deinitialize Monitor correctly after getting signal -/// +/// /// ## > (needed in signals post-processing) -/// +/// impl SigPostProcessing for Sig { async fn post_processing(&mut self) -> io::Result<()> { // manipulations ... diff --git a/noxis-rs/src/options/structs.rs b/noxis-rs/src/options/structs.rs index 56be896..46c3c23 100644 --- a/noxis-rs/src/options/structs.rs +++ b/noxis-rs/src/options/structs.rs @@ -1,21 +1,131 @@ #![allow(dead_code)] -use std::net::Ipv4Addr; -use serde::{Deserialize, Serialize}; use async_trait::async_trait; +use serde::{Deserialize, Serialize}; +use std::any::Any; +use std::net::Ipv4Addr; use std::sync::Arc; +pub mod bus { + use std::fmt::Debug; + + use super::*; + use crate::utils::metrics::processes::ProcessesQuery; + use crate::utils::metrics::MetricsExportable; + use noxis_cli::{metrics_models::MetricsMode, Cli}; + pub type BusMessageContent = Box; + + #[derive(Debug)] + pub enum BusMessage { + Request( + BusMessageDirection, + BusMessageContentType, + BusMessageContent, + ), + Response( + BusMessageDirection, + BusMessageContentType, + BusMessageContent, + ), + } + + #[derive(Debug)] + pub enum BusMessageDirection { + ToCli, + ToSupervisor, + ToMetrics, + } + + #[derive(Debug)] + pub enum BusMessageContentType { + RawString, + Cli, + MetricsObj, + Result, + MetricsModeTransfered, + ProcessQuery, + } + + #[derive(Debug)] + pub enum CLiCommand { + Start, + Stop, + Restart, + Freeze, + Unfreeze, + } + + #[derive(Debug)] + pub struct InternalCli { + pub prc: String, + pub cmd: CLiCommand, + } + + pub trait BusContent: Send + Sync + 'static + Debug + Any { + fn get_bus_type(&self) -> BusMessageContentType; + } + impl BusContent for anyhow::Result { + fn get_bus_type(&self) -> BusMessageContentType { + BusMessageContentType::Result + } + } + impl BusContent for String { + fn get_bus_type(&self) -> BusMessageContentType { + BusMessageContentType::RawString + } + } + impl BusContent for Cli { + fn get_bus_type(&self) -> BusMessageContentType { + BusMessageContentType::Cli + } + } + impl BusContent for InternalCli { + fn get_bus_type(&self) -> BusMessageContentType { + BusMessageContentType::Cli + } + } + impl BusContent for dyn MetricsExportable { + fn get_bus_type(&self) -> BusMessageContentType { + BusMessageContentType::MetricsObj + } + } + impl BusContent for Box { + fn get_bus_type(&self) -> BusMessageContentType { + BusMessageContentType::MetricsObj + } + } + impl BusContent for MetricsMode { + fn get_bus_type(&self) -> BusMessageContentType { + BusMessageContentType::MetricsModeTransfered + } + } + impl BusContent for ProcessesQuery { + fn get_bus_type(&self) -> BusMessageContentType { + BusMessageContentType::ProcessQuery + } + } +} + #[derive(Debug)] pub enum DependencyType { - File, + File, Service, } -#[derive(Debug)] +#[derive(Debug, Serialize, Clone, Copy)] pub enum ServiceState { Ok, - Unavailable + Unavailable, } +impl std::fmt::Display for ServiceState { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + return match self { + ServiceState::Ok => write!(f, "Ok"), + ServiceState::Unavailable => write!(f, "Unavailable"), + }; + } +} + pub struct ServiceWaitConfig(u32); impl Default for ServiceWaitConfig { @@ -25,7 +135,7 @@ impl Default for ServiceWaitConfig { } pub enum FileTriggerType { - OnChange, + OnChange, OnDelete, } @@ -34,48 +144,87 @@ impl std::fmt::Display for FileTriggerType { return match self { FileTriggerType::OnChange => write!(f, "File was changed"), FileTriggerType::OnDelete => write!(f, "File was moved or deleted"), - } + }; } } impl<'a> FileTriggerType { pub fn event(&self, file_name: Arc, trigger: Arc) -> Events { return match self { - FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)), - FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(file_name, DependencyType::File, trigger)), - } + FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged( + file_name, + DependencyType::File, + trigger, + )), + FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted( + file_name, + DependencyType::File, + trigger, + )), + }; } - pub fn event_from_file_trigger_controller(&self, file_name: Arc, trigger: &FileTriggersForController) -> Events { + pub fn event_from_file_trigger_controller( + &self, + file_name: Arc, + trigger: &FileTriggersForController, + ) -> Events { return match self { - FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_change.clone())), - FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(file_name, DependencyType::File, trigger.on_delete.clone())), - } + FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged( + file_name, + DependencyType::File, + trigger.on_change.clone(), + )), + FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted( + file_name, + DependencyType::File, + trigger.on_delete.clone(), + )), + }; } } #[derive(Debug)] pub enum Triggers { - File { on_change: Arc, on_delete: Arc }, - Service {on_lost: Arc, wait: u32}, + File { + on_change: Arc, + on_delete: Arc, + }, + Service { + on_lost: Arc, + wait: u32, + }, } impl Triggers { pub fn new_file(on_change: Arc, on_delete: Arc) -> Triggers { - Triggers::File { on_change, on_delete } + Triggers::File { + on_change, + on_delete, + } } pub fn new_service(on_lost: Arc, wait_time: u32) -> Triggers { - Triggers::Service{on_lost, wait: wait_time} + Triggers::Service { + on_lost, + wait: wait_time, + } } pub fn to_service_negative_event(&self, service_name: Arc) -> Option { if let Triggers::Service { on_lost, .. } = self { - return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable(service_name, DependencyType::Service, on_lost.clone()))) + return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable( + service_name, + DependencyType::Service, + on_lost.clone(), + ))); } None } } #[derive(Debug)] -pub struct FileTriggersForController{ pub on_change: Arc, pub on_delete: Arc } +pub struct FileTriggersForController { + pub on_change: Arc, + pub on_delete: Arc, +} pub struct ServiceTriggersForController(Arc); impl std::fmt::Display for DependencyType { @@ -83,21 +232,34 @@ impl std::fmt::Display for DependencyType { return match self { DependencyType::File => write!(f, "File"), DependencyType::Service => write!(f, "Service"), - } + }; + } +} + +#[derive(Debug, serde::Serialize, Clone, Copy)] +pub enum ProcessState { + Pending, + Holding, + Stopped, + StoppedByCli, + HoldingByCli, +} +impl std::fmt::Display for ProcessState { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + return match self { + ProcessState::Pending => write!(f, "Running"), + ProcessState::Holding => write!(f, "Frozen"), + ProcessState::Stopped => write!(f, "Stopped"), + ProcessState::StoppedByCli => write!(f, "Stopped by Admin"), + ProcessState::HoldingByCli => write!(f, "Frozen by Admin"), + }; } } -#[derive(Debug)] -pub enum ProcessState { - Pending, - Holding, - Stopped, - StoppedByCli, -} #[derive(Debug)] pub enum Events { Positive(Arc), - Negative(NegativeOutcomes) + Negative(NegativeOutcomes), } #[derive(Debug)] pub enum NegativeOutcomes { @@ -123,11 +285,11 @@ pub enum ConfigActuality { /// # Struct for the 1st level in json conf file /// ## for storing main config data -/// +/// /// > (needed in serialization and deserialization) -/// +/// /// *depends on* : `TrackingProcess` -/// +/// /// ``` json /// { /// -> "dateOfCreation": "1721381809104", @@ -141,8 +303,6 @@ pub struct Processes { // runner_id: usize, #[serde(rename = "dateOfCreation")] pub date_of_creation: String, - #[serde(rename = "configServer")] - pub config_server: String, #[serde(default)] pub processes: Vec, } @@ -150,9 +310,8 @@ pub struct Processes { impl Default for Processes { fn default() -> Self { Self { - date_of_creation : String::new(), - config_server : String::from("default"), - processes : Vec::new(), + date_of_creation: String::new(), + processes: Vec::new(), } } } @@ -161,15 +320,18 @@ impl Processes { pub fn is_default(&self) -> bool { self.date_of_creation.is_empty() } + pub fn get_version(&self) -> &str { + &self.date_of_creation + } } /// # Struct for the 2nd level in json conf file /// ## for each process to contain info, such as name, path and dependencies -/// +/// /// > (needed in serialization and deserialization) -/// +/// /// *depends on* : `Dependencies` -/// +/// /// ``` json /// ... /// "processes": [ @@ -190,11 +352,11 @@ pub struct TrackingProcess { /// # Struct for the 3d level in json conf file /// ## for processes' dependencies including files and services -/// +/// /// > (needed in serialization and deserialization) -/// +/// /// *depends on* : `Files`, `Services` -/// +/// /// ``` json /// ... /// "path": "/home/user/monitor/runner-rs/temp-process", @@ -214,11 +376,11 @@ pub struct Dependencies { /// # Struct for the 4th level in json conf file /// ## for containing file object with its triggers to manipulate in daemons -/// +/// /// > (needed in serialization and deserialization) -/// +/// /// *depends on* : `FileTriggers` -/// +/// /// ``` json /// ... /// "files": [ @@ -226,7 +388,7 @@ pub struct Dependencies { /// -> "filename": "dep-file", /// -> "src": "/home/user/monitor/runner-rs/tests/examples/", /// -> "triggers": { ... } -/// -> } , +/// -> } , /// ... /// ], ... /// ``` @@ -239,11 +401,11 @@ pub struct Files { /// # Struct for the 4th level in json conf file /// ## for containing service object with its triggers to manipulate in daemons -/// +/// /// > (needed in serialization and deserialization) -/// +/// /// *depends on* : `ServiceTriggers` -/// +/// /// ``` json /// ... /// "services": [ @@ -251,7 +413,7 @@ pub struct Files { /// -> "hostname" : "ya.ru", /// -> "port" : 443, /// -> "triggers": { ... } -/// -> } , +/// -> } , /// ... /// ], ... /// ``` @@ -264,11 +426,11 @@ pub struct Services { /// # Struct for the 5th level in json conf file /// ## for instancing each service's policies such as on lost or time to wait till reachable -/// +/// /// > (needed in serialization and deserialization) -/// +/// /// *depends on* : - -/// +/// /// ``` json /// ... /// "port": 443, @@ -288,11 +450,11 @@ pub struct ServiceTriggers { /// # Struct for the 5th level in json conf file /// ## for instancing each file's policies such as on-delete or onupdate events -/// +/// /// > (needed in serialization and deserialization) -/// +/// /// *depends on* : - -/// +/// /// ``` json /// ... /// "src": "/home/user/monitor/runner-rs/tests/examples/", @@ -308,106 +470,107 @@ pub struct FileTriggers { pub on_delete: String, #[serde(rename = "onChange")] pub on_change: String, + #[serde(rename = "doRestore")] + pub do_restore: bool, } /// # Metrics struct /// ## for gathering all system metrics (from container + each process) -/// +/// /// > (needed in hagent communication, `?...?`) -/// +/// /// *depends on* : `ContainerMetrics`, `ProcessMetrics` -/// -#[derive(Debug, Clone, Serialize,)] +/// +#[derive(Debug, Clone, Serialize)] pub struct Metrics { - pub container_metrics : ContainerMetrics, - pub processes_metrics : Vec, + pub container_metrics: ContainerMetrics, + pub processes_metrics: Vec, // pub net_metrics : Vec, } -/// ## Metrics struct's constructor +/// ## Metrics struct's constructor impl Metrics { pub fn new(cm: ContainerMetrics, prm: Vec) -> Self { Metrics { - container_metrics : cm, - processes_metrics : prm, + container_metrics: cm, + processes_metrics: prm, // net_metrics : net, } } } - /// # Container metrics struct /// ## for gathering all container metrics -/// +/// /// > (needed in gathering metrics) -/// +/// /// *depends on* : - -/// +/// #[derive(Debug, Clone, Serialize)] pub struct ContainerMetrics { - container_id : String, - cpu_load : f32, - ram_load : f32, + container_id: String, + cpu_load: f32, + ram_load: f32, // pub net_activity : ??? - processes : Vec, + processes: Vec, } -/// ## Container struct's constructor +/// ## Container struct's constructor impl ContainerMetrics { - pub fn new(container_id : &str, cpu: f32, ram: f32, subsystems: Vec,) -> Self{ + pub fn new(container_id: &str, cpu: f32, ram: f32, subsystems: Vec) -> Self { ContainerMetrics { - container_id : String::from(container_id), - cpu_load : cpu, - ram_load : ram, - processes : subsystems, + container_id: String::from(container_id), + cpu_load: cpu, + ram_load: ram, + processes: subsystems, } } } /// # Process metrics struct /// ## for gathering each process's all metrics -/// +/// /// > (needed in gathering metrics) -/// +/// /// *depends on* : - -/// +/// #[derive(Debug, Clone, Serialize)] pub struct ProcessMetrics { - pub process_name : String, - cpu_load : f32, - ram_load : f32, + pub process_name: String, + cpu_load: f32, + ram_load: f32, } -/// ## Process struct's constructor +/// ## Process struct's constructor impl ProcessMetrics { - pub fn new(process_name :&str, cpu: f32, ram: f32) -> Self { + pub fn new(process_name: &str, cpu: f32, ram: f32) -> Self { ProcessMetrics { - process_name : String::from(process_name), - cpu_load : cpu, - ram_load : ram, + process_name: String::from(process_name), + cpu_load: cpu, + ram_load: ram, } } } /// # Packet info struct /// ## for gathering info about container's net activity -/// +/// /// > (needed in gathering metrics) -/// +/// /// *depends on* : - -/// +/// #[derive(Debug, Clone, Serialize)] pub struct PacketInfo { - protocol : String, - dst_ip : Ipv4Addr, - src_ip : Ipv4Addr, - size : usize, + protocol: String, + dst_ip: Ipv4Addr, + src_ip: Ipv4Addr, + size: usize, } -/// ## PacketInfo's constructor +/// ## PacketInfo's constructor impl PacketInfo { pub fn new(prt: String, dest: Ipv4Addr, src: Ipv4Addr, size_of_packet: usize) -> Self { PacketInfo { - protocol : prt, - dst_ip : dest, - src_ip : src, - size : size_of_packet, + protocol: prt, + dst_ip: dest, + src_ip: src, + size: size_of_packet, } } -} \ No newline at end of file +} diff --git a/noxis-rs/src/utils.rs b/noxis-rs/src/utils.rs index b1e315a..385e527 100644 --- a/noxis-rs/src/utils.rs +++ b/noxis-rs/src/utils.rs @@ -1,39 +1,44 @@ +pub mod bus; pub mod files; pub mod hagent; pub mod metrics; pub mod prcs; pub mod services; -// TODO : saving current flags state - -use crate::options::structs::{CustomError, TrackingProcess, Processes}; -// use files::create_watcher; -// use files::file_handler; -// use inotify::Inotify; -use log::{error, warn, info}; -use prcs::{ - freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process, - unfreeze_process, -}; -// use services::service_handler; +use crate::options::structs::bus::{BusMessage, BusMessageContentType, InternalCli}; +use crate::options::structs::Processes; +use async_trait::async_trait; +use files::v2::FilesController; +use lazy_static::lazy_static; +use log::{error, info}; +use prcs::v2::ProcessesController; +use services::v2::ServicesController; use std::process::Command; use std::sync::Arc; -// use tokio::join; use tokio::sync::mpsc; use tokio::time::Duration; -// use tokio::sync::mpsc::{Receiver as MpscReciever, Sender as MpscSender}; -// controllers import -use prcs::v2::ProcessesController; -use files::v2::FilesController; -use services::v2::ServicesController; -use async_trait::async_trait; -const GET_ID_CMD: &str = "hostname"; +lazy_static! { + static ref GET_ID_CMD: &'static str = "hostname"; +} + +// const GET_ID_CMD: &str = "hostname"; pub mod v2 { - use std::collections::{BTreeMap, HashMap, LinkedList, VecDeque}; - use crate::options::structs::{Events, FileTriggersForController, ProcessUnit, Triggers}; use super::*; + use crate::options::preboot::PrebootParams; + use crate::utils::metrics::processes::{ProcessesAll, ProcessesQuery}; + use crate::{ + options::structs::{ + bus::CLiCommand, Events, FileTriggersForController, ProcessUnit, Triggers, + }, + utils::metrics::processes::deps::{Dependencies, FilesExtended, ServicesExtended}, + }; + use std::any::Any; + use std::collections::{BTreeMap, HashMap, LinkedList, VecDeque}; + + type BusReciever = tokio::sync::mpsc::Receiver; + type BusSender = Arc>; #[derive(Debug)] enum ControllerResult { @@ -44,19 +49,31 @@ pub mod v2 { #[derive(Debug)] struct Supervisor { - prcs : LinkedList, - files : LinkedList, - services : LinkedList, + prcs: LinkedList, + files: LinkedList, + services: LinkedList, + config: Arc, + bus: (BusReciever, BusSender), } impl Supervisor { - pub fn new() -> Supervisor { - Supervisor { prcs: LinkedList::new(), files: LinkedList::new(), services: LinkedList::new()} + pub fn new(bus_reciever: BusReciever, bus_sender: BusSender) -> Supervisor { + Supervisor { + prcs: LinkedList::new(), + files: LinkedList::new(), + services: LinkedList::new(), + config: Arc::new(Processes::default()), + bus: (bus_reciever, bus_sender), + } } - pub async fn with_config(mut self, config: &Processes) -> Supervisor { - let _ = config.processes.iter() - .for_each(|prc| { - let (rx, tx) = mpsc::channel::(10); + pub async fn with_config( + mut self, + config: Processes, + preboot : Arc + ) -> Supervisor { + self.config = Arc::from(config); + let _ = self.config.processes.iter().for_each(|prc| { + let (rx, tx) = mpsc::channel::(100); let temp = ProcessesController::new(&prc.name, tx).with_exe(&prc.path); if !self.prcs.contains(&temp) { self.prcs.push_back(temp); @@ -64,15 +81,29 @@ pub mod v2 { let rx = Arc::new(rx); let proc_name: Arc = Arc::from(prc.name.clone()); - let _ = prc.dependencies.files.iter() - .for_each(|file| { + let _ = prc.dependencies.files.iter().for_each(|file| { let mut hm = HashMap::new(); - let triggers = FileTriggersForController { on_change: Arc::from(file.triggers.on_change.clone()), on_delete: Arc::from(file.triggers.on_delete.clone())}; + let triggers = FileTriggersForController { + on_change: Arc::from(file.triggers.on_change.clone()), + on_delete: Arc::from(file.triggers.on_delete.clone()), + }; hm.insert(proc_name.clone(), (triggers, rx.clone())); - let tempfile = FilesController::new(&file.filename.as_str(), hm) - .with_path(&file.src); - + let backup_file = { + if file.triggers.do_restore { + use ulid::Ulid; + format!("{}{}.bak", { + let path = preboot.backup_folder.to_string_lossy(); + if path.ends_with("/") { path.to_string() } + else { format!("{}/", path) } + }, Ulid::new()) + } else { + String::new() + } + }; + + let tempfile = + FilesController::new(&file.filename.as_str(), hm).with_path(&file.src, backup_file); if let Ok(file) = tempfile { if let Some(current_file) = self.files.iter_mut().find(|a| &&file == a) { @@ -82,17 +113,15 @@ pub mod v2 { } } }); - + // servs - let _ = prc.dependencies.services.iter() - .for_each(|serv| { - let access_url = ServicesController::get_access_url(&serv.hostname, serv.port.as_ref()); + let _ = prc.dependencies.services.iter().for_each(|serv| { + let access_url = + ServicesController::get_access_url(&serv.hostname, serv.port.as_ref()); // preparations let rx = rx.clone(); - let serv_cont = ServicesController::new().with_access_name( - &serv.hostname, - &access_url - ); + let serv_cont = + ServicesController::new().with_access_name(&serv.hostname, &access_url); // triggers let arc: Arc = Arc::from(serv.triggers.on_lost.clone()); let triggers = Triggers::new_service(arc, serv.triggers.wait); @@ -104,12 +133,13 @@ pub mod v2 { let mut vec: VecDeque> = VecDeque::new(); vec.push_back(proc_name.clone()); // connection_queue - let mut connection_queue: BTreeMap>> = BTreeMap::new(); + let mut connection_queue: BTreeMap>> = + BTreeMap::new(); connection_queue.insert(serv.triggers.wait, vec); // event_reg let mut hm = HashMap::new(); hm.insert(proc_name.clone(), (triggers, rx)); - + let serv_cont = serv_cont.with_params(connection_queue, hm); self.services.push_back(serv_cont); } @@ -118,42 +148,211 @@ pub mod v2 { self } pub fn get_stats(&self) -> String { - format!("processes: {}, files: {}, services: {}", self.prcs.len(),self.files.len(), self.services.len()) + format!( + "processes: {}, files: {}, services: {}", + self.prcs.len(), + self.files.len(), + self.services.len() + ) + } + pub async fn extract_extended_procs( + config: Arc, + prcs_list: &LinkedList, + files_list: &LinkedList, + servs_list: &LinkedList, + ) -> Vec { + let mut procs = Vec::new(); + for prc in config.processes.iter() { + if let Some(prc_cont) = prcs_list + .iter() + .find(|&prc_cont| prc.name == *prc_cont.name) + { + let mut vec_files = Vec::new(); + let mut vec_services = Vec::new(); + prc.dependencies + .files + .iter() + .map(|file| (file, format!("{}{}", file.src, file.filename))) + .for_each(|(file, code_name)| { + if let Some(file_cont) = files_list + .iter() + .find(|&file_cont| *file_cont.get_code_name() == code_name) + { + vec_files.push(FilesExtended { + name: file.filename.to_string(), + path: file.src.to_string(), + status: file_cont.get_state(), + backup_file : file_cont.get_backup_file(), + triggers: file.triggers.to_owned(), + }); + } + }); + prc.dependencies + .services + .iter() + .map(|serv| { + ( + serv, + format!("{}{}", serv.hostname, { + if let Some(port) = serv.port { + format!(":{}", port) + } else { + String::new() + } + }), + ) + }) + .for_each(|(serv, acces_url)| { + if let Some(serv_cont) = servs_list + .iter() + .find(|&serv_cont| *serv_cont.get_arc_access_url() == acces_url) + { + vec_services.push(ServicesExtended { + name: serv.hostname.to_owned(), + access_name: (*serv_cont.get_arc_access_url()).to_owned(), + status: serv_cont.get_state(), + triggers: serv.triggers.to_owned(), + }); + } + }); + procs.push(ProcessesAll { + name: prc_cont.name.clone().to_string(), + state: prc_cont.get_state(), + pid: prc_cont.get_pid(), + dependencies: Dependencies { + files: vec_files, + services: vec_services, + }, + }); + } + } + procs } } - + #[async_trait] impl ProcessUnit for Supervisor { async fn process(&mut self) { info!("Initializing monitoring ..."); loop { - // dbg!(&self); + // + let rec = &mut self.bus.0; + while let Ok(request) = rec.try_recv() { + if let BusMessage::Request(_, _, cont) = request { + let cont: Box = cont; + match cont.downcast::() { + Ok(cli) => { + let mut count = 0; + let fut = (&mut self.prcs) + .into_iter() + .find(|prc| prc.name == Arc::from(cli.prc.as_ref())) + .map(|prc| async { + let count = &mut count; + *count += 1; + let res = match cli.cmd { + CLiCommand::Start => prc.start_by_user_call().await, + CLiCommand::Stop => prc.stop_by_user_call().await, + CLiCommand::Restart => prc.restart_by_user_call().await, + CLiCommand::Freeze => prc.freeze_by_user_call().await, + CLiCommand::Unfreeze => { + prc.unfreeze_by_user_call().await + } + }; + let sender = self.bus.1.clone(); + let resp_content = match res { + Ok(_) => Ok(format!( + "Ok on user call abour process {}", + prc.name + )), + Err(er) => Err(anyhow::Error::msg(format!( + "Error: User call for process {} failed : {}", + prc.name, er + ))), + }; + let _ = sender.send(BusMessage::Response( + crate::options::structs::bus::BusMessageDirection::ToCli, + BusMessageContentType::Result, + Box::new(resp_content) + )).await; + 1 + }); + if let Some(fut) = fut { + fut.await; + } else { + let _ = self.bus.1.clone().send(BusMessage::Response( + crate::options::structs::bus::BusMessageDirection::ToCli, + BusMessageContentType::RawString, + Box::new( + Err(anyhow::Error::msg(format!("No process named `{}` was found in controlled scope", cli.prc))) + ) + )).await; + } + } + Err(boxed) => { + if let Ok(query) = boxed.downcast::() { + match *query { + ProcessesQuery::QueryAll => { + let procs = Self::extract_extended_procs( + self.config.clone(), + &self.prcs, + &self.files, + &self.services, + ) + .await; + let _ = self.bus.1.clone().send(BusMessage::Response( + crate::options::structs::bus::BusMessageDirection::ToMetrics, + BusMessageContentType::ProcessQuery, + Box::new( + ProcessesQuery::All(procs) + ) + )).await; + } + ProcessesQuery::QueryGeneral => { + let mut vec = Vec::new(); + for prc in &self.prcs { + vec.push(prc.get_general_info().await); + } + let _ = self.bus.1.clone().send(BusMessage::Response( + crate::options::structs::bus::BusMessageDirection::ToMetrics, + BusMessageContentType::ProcessQuery, + Box::new( + ProcessesQuery::General(vec) + ) + )).await; + } + _ => { + let _ = self.bus.1.clone().send(BusMessage::Response( + crate::options::structs::bus::BusMessageDirection::ToCli, + BusMessageContentType::RawString, + Box::new( + Err(anyhow::Error::msg("Unknown request format was send to the Supervisor")) + ) + )).await; + } + } + } + } + } + } + } let mut tasks: Vec> = vec![]; - // let (mut prc, mut file, mut serv) = (self.prcs.pop_front().unwrap(), self.files.pop_front().unwrap(), self.services.pop_front().unwrap()); - // let res = tokio::join!(prc.process(), file.process(), serv.process()); if let Some(mut val) = self.prcs.pop_front() { - tasks.push( - tokio::spawn( async move { - val.process().await; - ControllerResult::Process(Some(val)) - }) - ); + tasks.push(tokio::spawn(async move { + val.process().await; + ControllerResult::Process(Some(val)) + })); } if let Some(mut val) = self.files.pop_front() { - tasks.push( - tokio::spawn( async move { - val.process().await; - ControllerResult::File(Some(val)) - }) - ); + tasks.push(tokio::spawn(async move { + val.process().await; + ControllerResult::File(Some(val)) + })); } if let Some(mut val) = self.services.pop_front() { - tasks.push( - tokio::spawn( async move { - val.process().await; - ControllerResult::Service(Some(val)) - }) - ); + tasks.push(tokio::spawn(async move { + val.process().await; + ControllerResult::Service(Some(val)) + })); } for task in tasks { match task.await { @@ -169,254 +368,25 @@ pub mod v2 { } } - // spawn tasks - // spawn prc - // spawn files - // spawn services - // ## for ... i.await in loop pub async fn init_monitoring( - config: Processes - ) -> anyhow::Result<()> { - let mut supervisor = Supervisor::new().with_config(&config).await; + config: Processes, + preboot : Arc, + bus_reciever: BusReciever, + bus_sender: BusSender, + ) -> anyhow::Result<()> { + let mut supervisor = Supervisor::new(bus_reciever, bus_sender) + .with_config(config, preboot) + .await; info!("Monitoring: {} ", &supervisor.get_stats()); supervisor.process().await; Ok(()) } - - - // async fn generate_controllers<'a>(config: Processes) -> (HashSet>, HashSet>, HashSet>) { - // let mut prcs: HashSet> = HashSet::new(); - // let mut files: HashSet> = HashSet::new(); - // let mut services: HashSet> = HashSet::new(); - // for prc in config.processes { - // let (rx, tx) = mpsc::channel::>(10); - // // let new_prc = ProcessesController::new(&prc.name, tx).with_exe(prc.path); - // let mut new_prc = ProcessesController::new("&prc.name", tx).with_exe(prc.path); - // let a = new_prc.process().await; - - // } - // (prcs, files, services) - // } - // spawn prc check with semaphore check - async fn prcs_monitoriing() -> anyhow::Result<()> { Ok(()) } - - // spawn file check with semaphore check - async fn files_monitoriing() -> anyhow::Result<()> { Ok(()) } - - // spawn service check with semaphore check - async fn services_monitoriing() -> anyhow::Result<()> { Ok(()) } } -/// # Fn `run_daemons` -/// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel -/// -/// *input* : `Arc`, `Arc>`, `&mut mpsc::Receiver`, -/// -/// *output* : () -/// -/// *initiator* : main thread -/// -/// *managing* : Arc to current process struct, Arc to managing channel writer, mut ref to managing channel reader -/// -/// *depends on* : all module `prcs`'s functions, fn `running_handler`, fn `utils::files::create_watcher` -/// -/// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process -/// -// pub async fn run_daemons( -// proc: Arc, -// tx: Arc>, -// rx: &mut mpsc::Receiver, -// ) { -// // creating watchers + ---buffers--- -// let mut watchers: Vec = vec![]; -// for file in proc.dependencies.files.clone().into_iter() { -// if let Ok(watcher) = create_watcher(&file.filename, &file.src).await { -// watchers.push(watcher); -// } else { -// let _ = tx.send(121).await; -// } -// // watchers.push(create_watcher(&file.filename, &file.src).await.unwrap()); -// } -// let watchers_clone: Arc>> = -// Arc::new(tokio::sync::Mutex::new(watchers)); - -// loop { -// let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone()); -// tokio::select! { -// _ = run_hand => continue, -// _val = rx.recv() => { -// if process_protocol_symbol(proc.clone(), _val.unwrap()).await.is_err() { -// return; -// } -// }, -// } -// tokio::task::yield_now().await; -// } -// } - -async fn process_protocol_symbol(proc: Arc, val: u8) -> Result<(), CustomError>{ - match val { - // 1 - File-dependency handling error -> terminating (after waiting) - 1 => { - if is_active(&proc.name).await { - error!("File-dependency handling error: Terminating {} process ..." , &proc.name); - terminate_process(&proc.name).await; - tokio::time::sleep(Duration::from_millis(500)).await; - } - // return; - }, - // 2 - File-dependency handling error -> holding (after waiting) - 2 => { - if !is_frozen(&proc.name).await { - error!("File-dependency handling error: Freezing {} process ..." , &proc.name); - freeze_process(&proc.name).await; - tokio::time::sleep(Duration::from_millis(100)).await; - } - }, - // 3 - Running process error - 3 => { - error!("Error due to starting {} process", &proc.name); - return Err(CustomError::Fatal) - }, - // 4 - Timeout of waiting service-dependency -> staying (after waiting) - 4 => { - // warn!("Timeout of waiting service-dependency: Ignoring on {} process ..." , &proc.name); - tokio::time::sleep(Duration::from_millis(100)).await; - }, - // 5 - Timeout of waiting service-dependency -> terminating (after waiting) - 5 => { - if is_active(&proc.name).await { - error!("Timeout of waiting service-dependency: Terminating {} process ..." , &proc.name); - terminate_process(&proc.name).await; - tokio::time::sleep(Duration::from_millis(500)).await; - } - }, - // 6 - Timeout of waiting service-dependency -> holding (after waiting) - 6 => { - // println!("holding {}-{}", proc.name, is_active(&proc.name).await); - if !is_frozen(&proc.name).await { - error!("Timeout of waiting service-dependency: Freezing {} process ..." , &proc.name); - freeze_process(&proc.name).await; - tokio::time::sleep(Duration::from_secs(1)).await; - } - }, - // // 7 - File-dependency change -> terminating (after check) - 7 => { - error!("File-dependency warning (file changed). Terminating {} process...", &proc.name); - terminate_process(&proc.name).await; - tokio::time::sleep(Duration::from_millis(100)).await; - return Err(CustomError::Fatal) - }, - // // 8 - File-dependency change -> restarting (after check) - 8 => { - warn!("File-dependency warning (file changed). Restarting {} process...", &proc.name); - let _ = restart_process(&proc.name, &proc.path).await; - tokio::time::sleep(Duration::from_millis(100)).await; - }, - // // 9 - File-dependency change -> staying (after check) - 9 => { - warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name); - tokio::time::sleep(Duration::from_millis(100)).await; - }, - - // 10 - Process unfreaze call via file handler (or service handler) - 10 | 11 => { - if is_frozen(&proc.name).await { - warn!("Unfreezing process {} call...", &proc.name); - unfreeze_process(&proc.name).await; - } - tokio::time::sleep(Duration::from_millis(100)).await; - }, - // 11 - Process unfreaze call via service handler - // 11 => { - // if is_frozen(&proc.name).await { - // warn!("Unfreezing process {} call...", &proc.name); - // unfreeze_process(&proc.name).await; - // } - // tokio::time::sleep(Duration::from_millis(100)).await; - // }, - // 101 - Impermissible trigger values in JSON - 101 => { - error!("Impermissible trigger values in JSON in {}'s block. Killing thread...", &proc.name); - if is_active(&proc.name).await { - terminate_process(&proc.name).await; - } - return Err(CustomError::Fatal) - }, - // - // 121 - Cannot create valid watcher for file dependency - // todo : think about valid situation - 121 => { - error!("Cannot create valid watcher for file dependency. Terminating {} process...", &proc.name); - let _ = terminate_process(&proc.name).await; - return Err(CustomError::Fatal) - }, - // 111 - global thread termination with killing current child in a face - // of a current process - 111 => { - warn!("Terminating {}'s child processes...", &proc.name); - match is_active(&proc.name).await { - true => { - terminate_process(&proc.name).await; - }, - false => { - log::info!("Process {} is already terminated!", proc.name); - }, - } - }, - _ => {}, - } - Ok(()) -} -// check process status daemon -/// # Fn `run_daemons` -/// ## func to async exec subjobs of checking process, services and files states -/// -/// *input* : `Arc`, `Arc>`, `Arc>>` -/// -/// *output* : () -/// -/// *initiator* : fn `run_daemons` -/// -/// *managing* : Arc to current process struct, Arc to Mutex to list of file watchers -/// -/// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}` -/// -// pub async fn running_handler( -// prc: Arc, -// tx: Arc>, -// watchers: Arc>>, -// ) { -// // services and files check (once) -// let files_check = file_handler( -// &prc.name, -// &prc.dependencies.files, -// tx.clone(), -// watchers.clone(), -// ); -// let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone()); - -// let res = join!(files_check, services_check); -// // if inactive -> spawn checks -> active is true -// if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() { -// if start_process(&prc.name, &prc.path).await.is_err() { -// tx.send(3).await.unwrap(); -// return; -// } -// } -// // if frozen -> spawn checks -> unfreeze is true -// else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() { -// tx.send(10).await.unwrap(); -// return; -// } -// // tokio::time::sleep(Duration::from_millis(100)).await; -// tokio::task::yield_now().await; -// } - // todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}' /// # Fn `get_container_id` /// ## for getting container id used in logs -/// +/// /// *input* : - /// /// *output* : Some(String) if cont-id was grubbed | None - if not @@ -426,9 +396,9 @@ async fn process_protocol_symbol(proc: Arc, val: u8) -> Result< /// *managing* : - /// /// *depends on* : - -/// +/// pub fn get_container_id() -> Option { - match Command::new(GET_ID_CMD).output() { + match Command::new(*GET_ID_CMD).output() { Ok(output) => { if !output.status.success() { return None; @@ -437,7 +407,7 @@ pub fn get_container_id() -> Option { if id.is_empty() { return None; } - Some(String::from_utf8_lossy(&output.stdout).to_string()) + Some(String::from_utf8_lossy(&output.stdout).trim().to_string()) } Err(_) => None, } diff --git a/noxis-rs/src/utils/bus.rs b/noxis-rs/src/utils/bus.rs new file mode 100644 index 0000000..f4b59a9 --- /dev/null +++ b/noxis-rs/src/utils/bus.rs @@ -0,0 +1,97 @@ +use std::sync::Arc; + +use crate::options::structs::bus::{BusMessage, BusMessageDirection}; +use crate::options::structs::ProcessUnit; +use log::{error, trace}; +use tokio::sync::mpsc::{Receiver, Sender}; + +type Inner = Receiver; +type Outter = Arc>; + +#[derive(Debug)] +pub struct Highway { + to_cli: Outter, + to_supervisor: Outter, + to_metrics: Outter, +} +impl Highway { + fn new(to_cli: Outter, to_supervisor: Outter, to_metrics: Outter) -> Self { + Self { + to_cli, + to_supervisor, + to_metrics, + } + } + async fn send(&self, msg: BusMessage) -> anyhow::Result<()> { + let dir = match &msg { + BusMessage::Request(dir, ..) | BusMessage::Response(dir, ..) => { + trace!("redirecting message to {:?} ...", dir); + dir + } + }; + match dir { + BusMessageDirection::ToCli => self.send_cli(msg).await, + BusMessageDirection::ToSupervisor => self.send_supervisor(msg).await, + BusMessageDirection::ToMetrics => self.send_metrics(msg).await, + } + } + async fn send_cli(&self, msg: BusMessage) -> anyhow::Result<()> { + self.to_cli.send(msg).await?; + Ok(()) + } + async fn send_supervisor(&self, msg: BusMessage) -> anyhow::Result<()> { + self.to_supervisor.send(msg).await?; + Ok(()) + } + async fn send_metrics(&self, msg: BusMessage) -> anyhow::Result<()> { + self.to_metrics.send(msg).await?; + Ok(()) + } +} + +pub struct Bus { + inner: Inner, + highway: Highway, +} + +impl Bus { + pub fn new(inner: Inner, to_cli: Outter, to_supervisor: Outter, to_metrics: Outter) -> Self { + Self { + inner, + highway: Highway::new(to_cli, to_supervisor, to_metrics), + } + } +} + +#[async_trait::async_trait] +impl ProcessUnit for Bus { + async fn process(&mut self) { + loop { + while let Ok(content) = self.inner.try_recv() { + // debug!("new message to the Bus : {:?}", &content); + let msg = match content { + BusMessage::Request(direction, content_type, content) => { + trace!( + "bus has got a new Request with direction {:?} and type {:?}", + &direction, + &content_type + ); + BusMessage::Request(direction, content_type, content) + } + BusMessage::Response(direction, content_type, content) => { + trace!( + "bus has got a new Response with direction {:?} and type {:?}", + &direction, + &content_type + ); + BusMessage::Response(direction, content_type, content) + } + }; + if let Err(er) = self.highway.send(msg).await { + error!("Cannot redirect message : {}", er); + } + } + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + } + } +} diff --git a/noxis-rs/src/utils/files.rs b/noxis-rs/src/utils/files.rs index da87b0f..dcbf93e 100644 --- a/noxis-rs/src/utils/files.rs +++ b/noxis-rs/src/utils/files.rs @@ -1,321 +1,293 @@ - use crate::options::structs::{CustomError, Files}; - use super::prcs::{is_active, is_frozen}; - use inotify::{EventMask, Inotify, WatchMask}; - use std::borrow::BorrowMut; - use std::path::Path; - use std::sync::Arc; - use tokio::sync::mpsc; - use tokio::sync::mpsc::Sender as Sender; - use tokio::time::Duration; - use crate::options::structs::Events; - use async_trait::async_trait; +use crate::options::structs::CustomError; +use crate::options::structs::Events; +use async_trait::async_trait; +use inotify::{EventMask, Inotify, WatchMask}; +use std::path::Path; +use std::sync::Arc; +use tokio::sync::mpsc::Sender; - pub mod v2 { - use log::{error, info, warn}; - use crate::options::structs::{FileTriggerType, FileTriggersForController as Triggers, ProcessUnit}; - use super::*; - use std::{collections::HashMap, path::Path}; +pub mod v2 { + use super::*; + use crate::options::structs::{ + FileTriggerType, FileTriggersForController as Triggers, ProcessUnit, + }; + use log::{error, info, warn}; + use serde::Serialize; + use std::{collections::HashMap, path::Path}; - type MpscSender = Arc>; - type EventHandlers = HashMap, (Triggers, MpscSender)>; + type MpscSender = Arc>; + type EventHandlers = HashMap, (Triggers, MpscSender)>; - #[derive(Debug)] - enum FileState { - Ok, - NotFound, + #[derive(Debug, Serialize, Clone, Copy)] + pub enum FileState { + Ok, + NotFound, + } + + #[derive(Debug)] + pub struct FilesController { + name: Arc, + path: String, + code_name: Arc, + backup_file : String, + state: FileState, + watcher: Option, + triggers: EventHandlers, + } + + impl PartialEq for FilesController { + fn eq(&self, other: &Self) -> bool { + self.code_name == other.code_name } + } - #[derive(Debug)] - pub struct FilesController { - name : Arc, - path : String, - code_name : Arc, - state : FileState, - watcher : Option, - triggers : EventHandlers, - } - - impl PartialEq for FilesController { - fn eq(&self, other: &Self) -> bool { - self.code_name == other.code_name + impl FilesController { + #[inline(always)] + pub fn new(name: &str, triggers: EventHandlers) -> FilesController { + let name: Arc = Arc::from(name); + Self { + name: name.clone(), + path: String::new(), + state: FileState::Ok, + watcher: None, + triggers, + code_name: name.clone(), + backup_file: String::new(), } } - - impl FilesController { - pub fn new(name: &str, triggers: EventHandlers) -> FilesController { - let name: Arc = Arc::from(name); - Self { - name : name.clone(), - path : String::new(), - state : FileState::Ok, - watcher : None, - triggers, - code_name : name.clone(), + #[inline(always)] + pub fn with_path(mut self, path: impl AsRef, backup : String) -> anyhow::Result { + self.path = path.as_ref().to_string_lossy().into_owned(); + self.watcher = { + match create_watcher(&self.name, &self.path) { + Ok(val) => Some(val), + Err(er) => { + error!( + "Cannot create watcher for {} ({}) due to {}", + self.name, &self.path, er + ); + return Err(er); + } } + }; + self.code_name = Arc::from(format!("{}{}", &self.path, &self.code_name)); + self.backup_file = backup; + match create_backup(&self.code_name, &self.backup_file) { + Ok(_) => info!("Backup file for {} was created ({})", &self.code_name, &self.backup_file), + Err(er) => warn!("{}. Ignoring ...", er), } - pub fn with_path(mut self, path: impl AsRef) -> anyhow::Result { - self.path = path.as_ref().to_string_lossy().into_owned(); - self.watcher = { - match create_watcher(&self.name, &self.path) { - Ok(val) => Some(val), - Err(er) => { - error!("Cannot create watcher for {} ({}) due to {}", self.name, &self.path, er); - return Err(er) - } + Ok(self) + } + pub fn add_event(&mut self, file_controller: FilesController) { + for (k, v) in file_controller.triggers { + self.triggers.entry(k).or_insert(v); + } + } + async fn trigger_on(&mut self, trigger_type: Option) { + for (prc_name, (triggers, channel)) in &self.triggers { + let msg = match &trigger_type { + None => Events::Positive(self.code_name.clone()), + Some(event) => { + info!( + "Event on file {} ({}) : {}. Notifying `{}` ...", + &self.name, &self.path, event, &prc_name + ); + event.event_from_file_trigger_controller(self.code_name.clone(), &triggers) } }; - self.code_name = Arc::from(format!("{}{}", &self.path, &self.code_name)); - Ok(self) - } - pub fn add_event(&mut self, file_controller : FilesController) { - for (k, v) in file_controller.triggers { - self.triggers.entry(k).or_insert(v); - } - } - async fn trigger_on(&mut self, trigger_type: Option) { - for (prc_name, (triggers, channel)) in &self.triggers { - let msg = match &trigger_type { - None => { - Events::Positive(self.code_name.clone()) - }, - Some(event) => { - info!("Event on file {} ({}) : {}. Notifying `{}` ...", &self.name, &self.path, event, &prc_name); - event.event_from_file_trigger_controller(self.code_name.clone(), &triggers) - }, - }; - let _ = channel.send(msg).await; - } + let _ = channel.send(msg).await; } } - #[async_trait] - impl ProcessUnit for FilesController { - async fn process(&mut self) { - // polling file check - // 1) existing check - // dbg!(&self); - if let Ok(_) = check_file(&self.name, &self.path).await { - if let FileState::NotFound = self.state { - info!("File {} ({}) was found in determined scope. Notifying ...", self.name, self.code_name); - self.state = FileState::Ok; - // reseting negative outcome in prc - self.trigger_on(None).await; - } - match &mut self.watcher { - Some(notify) => { - let mut buffer = [0; 1024]; - if let Ok(mut notif_events) = notify.read_events(&mut buffer) { - // notif_events.into_iter().for_each(|mask| {dbg!(&mask.mask);}); - // todo!(); - if let (recreate_watcher, true) = ( - notif_events.any(|mask| mask.mask == EventMask::DELETE_SELF), - notif_events.any(|mask| mask.mask == EventMask::MODIFY) - ) { + pub fn get_state(&self) -> FileState { + self.state + } + pub fn get_code_name(&self) -> Arc { + self.code_name.clone() + } + pub fn get_backup_file(&self) -> String { + self.backup_file.to_string() + } + } + #[async_trait] + impl ProcessUnit for FilesController { + async fn process(&mut self) { + if let Ok(_) = check_file(&self.name, &self.path).await { + if let FileState::NotFound = self.state { + info!( + "File {} ({}) was found in determined scope. Notifying ...", + self.name, self.code_name + ); + self.state = FileState::Ok; + self.trigger_on(None).await; + } + match &mut self.watcher { + Some(notify) => { + let mut buffer = [0; 128]; + if let Ok(notif_events) = notify.read_events(&mut buffer) { + let (need_to_recreate, was_modifired) = + notif_events.fold((false, false), |(a, b), mask| { + ( + a || mask.mask == EventMask::DELETE_SELF, + b || mask.mask == EventMask::MODIFY, + ) + } + ); + if self.backup_file.is_empty() { + + } else { + + } + if let (mut recreate_watcher, true) = (need_to_recreate, was_modifired) { + if self.backup_file.is_empty() { warn!("File {} ({}) was changed", self.name, &self.path); - if recreate_watcher { - self.watcher = match create_watcher(&self.name, &self.path) { - Ok(notifier) => Some(notifier), - Err(er) => { - error!("Failed to recreate watcher for {} ({}) due to {}", - self.name, - &self.path, - er - ); - None - }, + self.trigger_on(Some(FileTriggerType::OnChange)).await; + } else { + recreate_watcher = true; + match restore_file(&self.code_name, &self.backup_file).await { + Ok(_) => info!("File {} was successfully restored", &self.code_name), + Err(er) => error!("Cannot restore file {} : {}", &self.code_name, er), + } + } + if recreate_watcher { + self.watcher = match create_watcher(&self.name, &self.path) { + Ok(notifier) => Some(notifier), + Err(er) => { + error!( + "Failed to recreate watcher for {} ({}) due to {}", + self.name, &self.path, er + ); + None } } - self.trigger_on(Some(FileTriggerType::OnChange)).await; - return; } } - }, - None => { /* DEAD END */}, + } } - } else { - if let FileState::Ok = self.state { - warn!("File {} ({}) was not found in determined scope", self.name, &self.path); + None => return, + } + } else { + if let FileState::Ok = self.state { + if self.backup_file.is_empty() { + warn!( + "File {} ({}) was not found in determined scope", + self.name, &self.path + ); self.state = FileState::NotFound; self.trigger_on(Some(FileTriggerType::OnDelete)).await; - } - return; - } - self.trigger_on(None).await; - // 2) change check - } - } - } - - /// # Fn `create_watcher` - /// ## for creating watcher on file's delete | update events - /// - /// *input* : `&str`, `&str` - /// - /// *output* : `Err` if it cant create file watcher | `Ok(watcher)` on successfull construction - /// - /// *initiator* : fn `file_handler`, fn `utils::run_daemons` - /// - /// *managing* : current file's name: &str, path in local storage to current file: &str - /// - /// *depends on* : - - /// - pub fn create_watcher(filename: &str, path: &str) -> anyhow::Result { - let src = format!("{}{}", path, filename); - let inotify: Inotify = Inotify::init()?; - inotify.watches().add(&src, WatchMask::ALL_EVENTS)?; - Ok(inotify) - } - - /// # Fn `create_watcher` - /// ## for managing processes by checking dep files' states - /// - /// *input* : `&str`, `&[Files]`, `Arc>`, `Arc>>` - /// - /// *output* : `Err` if something with dep file is wrong | `Ok(())` on successfull dep file check - /// - /// *initiator* : fn `utils::running_handler` - /// - /// *managing* : current process's name: &str, list of dep files : `&[Files]`, atomic ref counter on sender main channel for current process `Arc>`, mut list of file watchers`Arc>>` - /// - /// *depends on* : Files - /// - pub async fn file_handler( - name: &str, - files: &[Files], - tx: Arc>, - watchers: Arc>>, - ) -> anyhow::Result<()> { - for (i, file) in files.iter().enumerate() { - // let src = format!("{}{}", file.src, file.filename); - if check_file(&file.filename, &file.src).await.is_err() { - if !is_active(name).await || is_frozen(name).await { - return Err(anyhow::Error::msg("Process is frozen or stopped")); - } - match file.triggers.on_delete.as_str() { - "stay" => { - tx.send(9).await.unwrap(); - continue; - } - "stop" => { - if is_active(name).await { - tx.send(1).await.unwrap(); - } - return Err(anyhow::Error::msg("Process was stopped")); - } - "hold" => { - if is_active(name).await { - tx.send(2).await.unwrap(); - return Err(anyhow::Error::msg("Process was frozen")); - } - } - _ => { - tokio::time::sleep(Duration::from_millis(50)).await; - tx.send(101).await.unwrap(); - return Err(anyhow::Error::msg("Impermissible character or word in file trigger")); - } - } - } else if is_active(name).await && !is_frozen(name).await { - let watchers = watchers.clone(); - // println!("mutex: {:?}", watchers); - let mut buffer = [0; 128]; - let mut mutex_guard = watchers.lock().await; - if let Some(notify) = mutex_guard.get_mut(i) { - let events = notify.read_events(&mut buffer); - // println!("{:?}", events); - if events.is_ok() { - let events: Vec = events - .unwrap() - .map(|mask| mask.mask) - .filter(|mask| { - *mask == EventMask::MODIFY || *mask == EventMask::DELETE_SELF - }) - .collect(); - for event in events { - if let EventMask::DELETE_SELF = event { - // ! warning (DELETE_SELF event) ! - // println!("! warning (DELETE_SELF event) !"); - // * watcher recreation after dealing with file recreation mechanism in text editors - let mutex = notify.borrow_mut(); - - // *mutex = create_watcher(&file.filename, &file.src).await.unwrap(); - if let Ok(watcher) = create_watcher(&file.filename, &file.src) { - *mutex = watcher; + } else { + warn!( + "File {} ({}) was not found in determined scope. Restoring from backup-file ...", + self.name, &self.path + ); + match restore_file(&self.code_name, &self.backup_file).await { + Err(er) => error!("Cannot restore file {} : {}", &self.code_name, er), + Ok(_) => { + info!("File {} was successfully restored", &self.code_name); + self.watcher = match create_watcher(&self.name, &self.path) { + Ok(notifier) => Some(notifier), + Err(er) => { + error!( + "Failed to recreate watcher for {} ({}) : {}", + self.name, &self.path, er + ); + None + } } - } - match file.triggers.on_change.as_str() { - "stop" => { - let _ = tx.send(7).await; - } - "restart" => { - let _ = tx.send(8).await; - } - "stay" => { - let _ = tx.send(9).await; - } - _ => { - let _ = tx.send(101).await; - } - } + }, } } } + return; } + self.trigger_on(None).await; } - tokio::task::yield_now().await; - Ok(()) } +} - /// # Fn `check_file` - /// ## for checking existance of current file - /// - /// *input* : `&str`, `&str` - /// - /// *output* : `Ok(())` if file exists | `Err(_)` if not | panic on fs error - /// - /// *initiator* : fn `file_handler` - /// - /// *managing* : current file's name: `&str` and current file's path in local storage: `&str` - /// - /// *depends on* : network activity - /// - pub async fn check_file(filename: &str, path: &str) -> Result<(), CustomError> { - let arc_name = Arc::new(filename.to_string()); - let arc_path = Arc::new(path.to_string()); - tokio::task::spawn_blocking(move || { - let file_concat = format!("{}{}", arc_path, arc_name); - let path = Path::new(&file_concat); - if path.exists() { - Ok(()) - } else { - Err(CustomError::Fatal) - } - }) - .await - .unwrap_or_else(|_| { - panic!("Corrupted while file check process"); - }) +pub fn create_backup(target: &str, backup: &str) -> anyhow::Result { + return if !backup.is_empty() { + Ok(std::fs::copy(target, backup)?) + } else { + Err(anyhow::Error::msg(format!("No need to create backup-file for {}", target))) } +} - #[cfg(test)] - mod files_unittests { - use super::*; - #[tokio::test] - async fn try_to_create_watcher() { - let res = create_watcher("dep-file", "./tests/examples/"); - assert!(res.is_ok()); - } - #[tokio::test] - async fn try_to_create_invalid_watcher() { - let res = create_watcher("invalid-file", "/path/to/the/no/dir"); - assert!(res.is_err()); - } - #[tokio::test] - async fn check_existing_file() { - let res = check_file("dep-file", "./tests/examples/").await; - assert!(res.is_ok()); - } - #[tokio::test] - async fn check_non_existing_file() { - let res = check_file("invalid-file", "/path/to/the/no/dir").await; - assert!(res.is_err()); +pub async fn restore_file(target: &str, backup: &str) -> anyhow::Result { + Ok(tokio::fs::copy(backup, target).await?) +} + +/// # Fn `create_watcher` +/// ## for creating watcher on file's delete | update events +/// +/// *input* : `&str`, `&str` +/// +/// *output* : `Err` if it cant create file watcher | `Ok(watcher)` on successfull construction +/// +/// *initiator* : fn `file_handler`, fn `utils::run_daemons` +/// +/// *managing* : current file's name: &str, path in local storage to current file: &str +/// +/// *depends on* : - +/// +pub fn create_watcher(filename: &str, path: &str) -> anyhow::Result { + let src = format!("{}{}", path, filename); + let inotify: Inotify = Inotify::init()?; + inotify.watches().add(&src, WatchMask::ALL_EVENTS)?; + Ok(inotify) +} + +/// # Fn `check_file` +/// ## for checking existance of current file +/// +/// *input* : `&str`, `&str` +/// +/// *output* : `Ok(())` if file exists | `Err(_)` if not | panic on fs error +/// +/// *initiator* : fn `file_handler` +/// +/// *managing* : current file's name: `&str` and current file's path in local storage: `&str` +/// +/// *depends on* : network activity +/// +pub async fn check_file(filename: &str, path: &str) -> Result<(), CustomError> { + let arc_name = Arc::new(filename.to_string()); + let arc_path = Arc::new(path.to_string()); + tokio::task::spawn_blocking(move || { + let file_concat = format!("{}{}", arc_path, arc_name); + let path = Path::new(&file_concat); + if path.exists() { + Ok(()) + } else { + Err(CustomError::Fatal) } + }) + .await + .unwrap_or_else(|_| { + panic!("Corrupted while file check process"); + }) +} + +#[cfg(test)] +mod files_unittests { + use super::*; + #[tokio::test] + async fn try_to_create_watcher() { + let res = create_watcher("dep-file", "./tests/examples/"); + assert!(res.is_ok()); } + #[tokio::test] + async fn try_to_create_invalid_watcher() { + let res = create_watcher("invalid-file", "/path/to/the/no/dir"); + assert!(res.is_err()); + } + #[tokio::test] + async fn check_existing_file() { + let res = check_file("dep-file", "./tests/examples/").await; + assert!(res.is_ok()); + } + #[tokio::test] + async fn check_non_existing_file() { + let res = check_file("invalid-file", "/path/to/the/no/dir").await; + assert!(res.is_err()); + } +} diff --git a/noxis-rs/src/utils/hagent.rs b/noxis-rs/src/utils/hagent.rs index 79e7dfc..5ba8fc3 100644 --- a/noxis-rs/src/utils/hagent.rs +++ b/noxis-rs/src/utils/hagent.rs @@ -1,15 +1,15 @@ -// +// // module needed to check host-agent health condition and to communicate with it -// +// +use anyhow::{Error, Ok, Result}; use tokio::{io::Interest, net::UnixStream}; -use anyhow::{Ok, Result, Error}; -// to kill lint bug +// to kill lint bug #[allow(unused_imports)] use tokio::net::UnixListener; /// # Fn `open_unix_socket` -/// ## opening unix-socket for host-agent communication -/// +/// ## opening unix-socket for host-agent communication +/// /// *input* : - /// /// *output* : `Ok(socket)` if socket was successfully opened | `Err(er)` if not @@ -19,7 +19,7 @@ use tokio::net::UnixListener; /// *managing* : - /// /// *depends on* : - -/// +/// #[allow(dead_code)] async fn open_unix_socket(sock_path: &str) -> Result { // "/var/run/enode/hostagent.sock" @@ -27,18 +27,18 @@ async fn open_unix_socket(sock_path: &str) -> Result } /// # Fn `ha_healthcheck` -/// ## for checking host-agent state -/// +/// ## for checking host-agent state +/// /// *input* : `&UnixStream` /// /// *output* : `Ok(()))` if host-agent is running | `Err(er)` if not /// /// *initiator* : main thread `(??)` /// -/// *managing* : ref on unix-socket object +/// *managing* : ref on unix-socket object /// /// *depends on* : - -/// +/// #[allow(dead_code)] async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> { socket.ready(Interest::WRITABLE).await?; @@ -48,8 +48,8 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> { } /// # Fn `ha_healthcheck` -/// ## for sending data to host-agent using unix-socket -/// +/// ## for sending data to host-agent using unix-socket +/// /// *input* : `&UnixStream`, `&str` /// /// *output* : `Ok(()))` if data was sent| `Err(er)` if not @@ -59,9 +59,9 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> { /// *managing* : socket: `&UnixStream`, data: `&str` /// /// *depends on* : - -/// +/// #[allow(dead_code)] -async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error > { +async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error> { socket.ready(Interest::WRITABLE).await?; socket.writable().await?; socket.try_write(data.as_bytes())?; @@ -91,8 +91,8 @@ mod hagent_unittets { // --Result // one-shot func async fn hagent_communication_test() { - use crate::options::structs::{ProcessMetrics, ContainerMetrics, Metrics}; - + use crate::options::structs::{ContainerMetrics, Metrics, ProcessMetrics}; + let procm = ProcessMetrics::new("test-prc", 15.0, 5.0); let contm = ContainerMetrics::new("test", 32.0, 12.0, vec![procm.process_name.clone()]); let metrics = Metrics::new(contm, vec![procm]); @@ -105,10 +105,11 @@ mod hagent_unittets { let sock = sock.unwrap(); assert!(ha_healthcheck(&sock).await.is_ok()); assert!(ha_send_data(&sock, &metrics).await.is_ok()); - } #[tokio::test] async fn open_unixsocket_test() { - assert!(open_unix_socket("non/valid/socket/file.sock").await.is_err()); + assert!(open_unix_socket("non/valid/socket/file.sock") + .await + .is_err()); } -} \ No newline at end of file +} diff --git a/noxis-rs/src/utils/metrics.rs b/noxis-rs/src/utils/metrics.rs index 756e44b..5463986 100644 --- a/noxis-rs/src/utils/metrics.rs +++ b/noxis-rs/src/utils/metrics.rs @@ -1,230 +1,571 @@ -// submodule needed to get metrics such as -// cpu load, ram/rom load and net activity +///! Submodule needed to get metrics such as +///! cpu load, ram/rom load and net activity -// use std::sync::Mutex; -use std::sync::Arc; -use crate::options::structs::TrackingProcess; -use sysinfo::{Process, System}; -use tokio::join; -use crate::options::structs::{ProcessMetrics, ContainerMetrics}; -use super::get_container_id; -// use pcap::{Device, Capture, Active}; -// use std::net::Ipv4Addr; -// use anyhow::{Result, Ok}; +use crate::{ + options::structs::ProcessState, + utils::metrics::processes::{ProcessesGeneral, ProcessesQuery}, +}; +use log::warn; +use noxis_cli::metrics_models::MetricsMode; +use std::{any::Any, collections::BTreeMap, sync::Arc}; +// use chrono::Duration; +use super::prcs::v2::Pid; +use crate::options::structs::bus::{BusMessage, BusMessageContentType, BusMessageDirection}; +use serde::Serialize; +use std::fmt::Debug; +use sysinfo::{Disks as DisksList, Networks, System}; +// use noxis_cli::metrics_models::MetricsMode; -// type PacketBuffer = Arc>>; +pub type MetricProcesses = Vec; +type CoreUsage = BTreeMap; +type Disks = Vec; +type Ifaces = Vec; +type BusReciever = tokio::sync::mpsc::Receiver; +type BusSender = Arc>; /// # Fn `init_metrics_grubber` /// ## for initializing process of unstoppable grubbing metrics. -/// +/// /// *input* : `Arc>` ?? /// /// *output* : `Err` if it cant create grubbers | `Ok` on finish /// -/// *initiator* : main thread ?? +/// *initiator* : main thread ?? /// -/// *managing* : object of unix-socket reader +/// *managing* : object of unix-socket reader /// /// *depends on* : - -/// -#[allow(dead_code)] -pub async fn init_metrics_grubber() { +/// +pub async fn init_metrics_grubber( + /* BROADCSAT LISTENER TO GET `PROCESSES` OBJ */ + bus_sender: BusSender, + bus_reciever: BusReciever, +) -> anyhow::Result<()> { let mut system = System::new(); - // let mut buffer: Vec = vec![]; - // let shared_buf: PacketBuffer = Arc::new(Mutex::new(buffer)); + let mut disks = DisksList::new_with_refreshed_list(); + let mut networks = Networks::new_with_refreshed_list(); + // get_all_metrics(&mut system).await; + /* TODO */ + let mut bus_reciever = bus_reciever; + loop { + let msg = bus_reciever.try_recv(); + if let Ok(BusMessage::Request(_, _, cont)) = msg { + system.refresh_all(); + disks.refresh_list(); + networks.refresh_list(); + let cont: Box = cont; + match cont.downcast::() { + Err(_) => { + warn!("Unrecognized Metric mode was given"); + let _ = bus_sender + .send(BusMessage::Response( + BusMessageDirection::ToCli, + BusMessageContentType::Result, + Box::new(Err(anyhow::Error::msg(format!( + "Unrecognized Metric mode was given" + )))), + )) + .await; + } + Ok(mode) => { + tokio::time::sleep(tokio::time::Duration::from_millis(200)).await; + let metric: Box = match *mode { + MetricsMode::Full => { + let mut refs = + get_all_metrics(&mut system, bus_sender.clone(), &disks, &networks) + .await; + if let Some(prcs) = bus_reciever.recv().await { + if let BusMessage::Response(_, _, cont) = prcs { + let cont: Box = cont; + if let Ok(cont) = cont.downcast::() { + if let ProcessesQuery::General(info) = *cont { + refs.processes = info; + } + } + } + } + Box::new(refs) + } + MetricsMode::Host => { + Box::new(get_global_host_info(&mut system, &disks, &networks).await) + } + MetricsMode::Cpu => Box::new(get_cpu_metrics(&mut system).await), + MetricsMode::Ram => Box::new(get_ram_metrics(&mut system).await), + MetricsMode::Rom => Box::new(get_all_disks_metrics(&disks).await), + MetricsMode::Network => Box::new(get_all_ifaces_metrics(&networks).await), + // inspect processes + MetricsMode::Processes => { + todo!(); + } + }; + // let metric: Box = Box::new(metric); + let metric = metric.serialze_into_output(); - system.refresh_all(); - // let temp = String::from_utf8(get_pid("systemd").await.unwrap().stdout).unwrap(); - // let prc = system.process(Pid::from_str(&temp).unwrap()).unwrap(); - // prc. - // let _ = capture_packets(shared_buf.clone()).await; + let _ = bus_sender + .send(BusMessage::Response( + BusMessageDirection::ToCli, + BusMessageContentType::MetricsObj, + Box::new(metric), + )) + .await; + } + } + } else if let Ok(BusMessage::Response(_, _, cont)) = msg { + let cont: Box = cont; + if let Ok(info) = cont.downcast::() { + if let ProcessesQuery::All(info) = *info { + let procs: Vec<_> = info + .into_iter() + .map(|prc| ProcessExtended::from_process_query_all(&mut system, prc)) + .collect(); + let _ = bus_sender + .send(BusMessage::Response( + BusMessageDirection::ToCli, + BusMessageContentType::Result, + Box::>::new(Ok(serde_json::to_string_pretty( + &procs, + )?)), + )) + .await; + } else { + let _ = bus_sender + .send(BusMessage::Response( + BusMessageDirection::ToCli, + BusMessageContentType::Result, + Box::new(Err(anyhow::Error::msg(format!( + "Unknown type was send by the Supervisor" + )))), + )) + .await; + } + } else { + let _ = bus_sender + .send(BusMessage::Response( + BusMessageDirection::ToCli, + BusMessageContentType::Result, + Box::new(Err(anyhow::Error::msg(format!( + "Unknown type was send by the Supervisor" + )))), + )) + .await; + } + } + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } } -#[allow(dead_code)] -#[allow(unused_variables)] -async fn gather_metrics(proc: Arc) { - +async fn get_all_metrics( + system: &mut System, + sender: BusSender, + disks: &DisksList, + networks: &Networks, +) -> FullMetrics { + let host = get_host_info().await; + let cpu = get_cpu_metrics(system).await; + let ram = get_ram_metrics(system).await; + let disks = get_all_disks_metrics(&disks).await; + let ifaces = get_all_ifaces_metrics(&networks).await; + let prcs: Vec = Vec::new(); + let _ = sender + .send(BusMessage::Request( + BusMessageDirection::ToSupervisor, + BusMessageContentType::ProcessQuery, + Box::new(ProcessesQuery::QueryGeneral), + )) + .await; + FullMetrics::create(host, cpu, ram, disks, ifaces, prcs) } -// DEPRECATED : for net monitoring -// async fn capture_packets(buffer: PacketBuffer) -> Result<()> { -// let mut cap = Capture::from_device(Device::lookup()?.unwrap())? -// .promisc(true) -// .open()?; - -// cap.filter("not broadcast and not multicast", true)?; - -// while let core::result::Result::Ok(packet) = cap.next_packet() { -// if let Some((src, dst, prot)) = get_packet_info(&packet.data).await { -// let packet_info = PacketInfo::new(String::from(prot), dst, src, packet.header.len as usize); -// let mut locked_buffer = buffer.lock().unwrap(); -// println!("{:?}", &packet_info); -// locked_buffer.push(packet_info); -// } -// } -// Ok(()) -// } -// async fn get_packet_info(data: &[u8]) -> Option<(Ipv4Addr, Ipv4Addr, &str)> { -// if data.len() >= 20 { -// let src_ip = Ipv4Addr::new(data[12], data[13], data[14], data[15]); -// let dst_ip = Ipv4Addr::new(data[16], data[17], data[18], data[19]); -// let protocol = match data[9] { -// 1 => "ICMP", -// 6 => "TCP", -// 17 => "UDP", -// _ => "Unknown", -// }; - -// Some((src_ip, dst_ip, protocol)) -// } else { -// None -// } -// } - - -/// # Fn `get_all_container_metrics` -/// ## for gathering all container (whole system metrics) -/// -/// *input* : `Arc`, `Arc>` -/// -/// *output* : `ContainerMetrics` -/// -/// *initiator* : main thread ?? -/// -/// *managing* : ref counter to `System` object, ref counter to list of processes -/// -/// *depends on* : `TrackingProcess` -/// -#[allow(dead_code)] -async fn get_all_container_metrics(sys: Arc, prcs: Arc>) -> ContainerMetrics { - let metrics = join!( - get_cpu_metrics_container(sys.clone()), - get_ram_metrics_container(sys.clone()), - get_subsystems(prcs.clone()) - ); - ContainerMetrics::new( - &get_container_id().unwrap_or(String::from("unknown")), - metrics.0, - metrics.1, - metrics.2 - ) +async fn get_global_host_info( + system: &mut System, + disks: &DisksList, + networks: &Networks, +) -> HostGeneral { + HostGeneral { + hostname: System::host_name().unwrap_or_default(), + os: System::long_os_version().unwrap_or_default(), + kernel: System::kernel_version().unwrap_or_default(), + cpu_percentage: system.global_cpu_usage(), + ram_available: system.total_memory() - system.free_memory(), + disk_percentage: { + let total = disks + .iter() + .map(|disk| disk.available_space() * 100 / disk.total_space()) + .collect::>(); + total.iter().sum::() / (total.len() as u64) + }, + net_stat: { + let total = networks + .iter() + .map(|(_, iface_data)| iface_data.received() + iface_data.transmitted()) + .collect::>(); + total.iter().sum::() / ((total.len() * 2) as u64) + }, + } } -/// # Fn `get_cpu_metrics_container` -/// ## for gathering container cpu metrics -/// -/// *input* : `Arc` -/// -/// *output* : `f32` -/// -/// *initiator* : main thread ?? -/// -/// *managing* : ref counter to `System` object -/// -/// *depends on* : - -/// -#[allow(dead_code)] -async fn get_cpu_metrics_container(sys: Arc) -> f32 { - sys.global_cpu_usage() +async fn get_host_info() -> HostInfo { + HostInfo { + hostname: System::host_name().unwrap_or_default(), + os: System::long_os_version().unwrap_or_default(), + kernel: System::kernel_version().unwrap_or_default(), + } } -/// # Fn `get_ram_metrics_container` -/// ## for gathering container ram metrics -/// -/// *input* : `Arc` -/// -/// *output* : `f32` -/// -/// *initiator* : main thread ?? -/// -/// *managing* : ref counter to `System` object -/// -/// *depends on* : - -/// -#[allow(dead_code)] -async fn get_ram_metrics_container(sys: Arc) -> f32 { - (sys.used_memory() / sys.total_memory()) as f32 * 100.0 -} -// async fn get_mem_metrics_container(sys: Arc) -> f32 { -// sys. -// } +async fn get_cpu_metrics(system: &mut System) -> Cpu { + let mut buffer = CoreUsage::new(); + let global_usage = system.global_cpu_usage(); -/// # Fn `get_subsystems` -/// ## for gathering info about container subsystems (processes) -/// -/// *input* : `Arc>` -/// -/// *output* : `Vec` -/// -/// *initiator* : main thread ?? -/// -/// *managing* : ref counter to list of `TrackingProcess` -/// -/// *depends on* : `TrackingProcess` -/// -#[allow(dead_code)] -async fn get_subsystems(prcs: Arc>) -> Vec { - prcs.iter().map(|process| process.name.clone()).collect() + system.cpus().iter().enumerate().for_each(|(id, cpu)| { + let core_info = CoreInfo { + // id, + brand: cpu.brand().to_string(), + name: cpu.name().to_string(), + frequency: cpu.frequency(), + vendor_id: cpu.vendor_id().to_string(), + usage: cpu.cpu_usage(), + }; + buffer.entry(id).or_insert(core_info); + }); + + Cpu { + global_usage, + usage: buffer, + } } -/// # Fn `get_all_metrics_process` -/// ## for gathering all process' metrics -/// -/// *input* : `Arc`, `Arc` -/// -/// *output* : `ProcessMetrics` -/// -/// *initiator* : main thread ?? -/// -/// *managing* : two ref counters to `Process` and `System` -/// -/// *depends on* : - -/// -#[allow(dead_code)] -async fn get_all_metrics_process(proc: Arc, sys: Arc) -> ProcessMetrics { - let metrics = join!( - get_cpu_metrics_process(proc.clone()), - get_ram_metrics_process(proc.clone(), sys.clone()) - ); - ProcessMetrics::new( - proc.name().to_str().unwrap_or("unknown"), - metrics.0, - metrics.1 - ) +async fn get_ram_metrics(system: &mut System) -> Ram { + Ram { + free_mem: system.free_memory(), + free_swap: system.free_swap(), + total_mem: system.total_memory(), + total_swap: system.total_swap(), + } } -/// # Fn `get_cpu_metrics_process` -/// ## for gathering process cpu metrics -/// -/// *input* : `Arc` -/// -/// *output* : `f32` -/// -/// *initiator* : main thread ?? -/// -/// *managing* : ref counter to `Process` object -/// -/// *depends on* : - -/// -async fn get_cpu_metrics_process(proc: Arc) -> f32 { - proc.cpu_usage() +async fn get_all_disks_metrics(disks: &DisksList) -> Disks { + // let disks = DisksList::new_with_refreshed_list(); + let mut buffer = Disks::new(); + disks.list().iter().for_each(|disk| { + let disk = Disk { + name: disk.name().to_string_lossy().into_owned(), + kind: disk.kind().to_string(), + fs: disk.file_system().to_string_lossy().into_owned(), + mount_point: disk.mount_point().to_string_lossy().into_owned(), + total_space: disk.total_space(), + available_space: disk.available_space(), + is_removable: disk.is_removable(), + is_readonly: disk.is_read_only(), + }; + buffer.push(disk); + }); + buffer } -/// # Fn `get_ram_metrics_process` -/// ## for gathering process ram metrics -/// -/// *input* : `Arc` -/// -/// *output* : `f32` -/// -/// *initiator* : main thread ?? -/// -/// *managing* : ref counter to `Process` object -/// -/// *depends on* : - -/// -async fn get_ram_metrics_process(proc: Arc, sys: Arc) -> f32 { - (proc.memory() as f64 / sys.total_memory() as f64) as f32 * 100.0 as f32 +async fn get_all_ifaces_metrics(networks: &Networks) -> Ifaces { + let mut ifaces = Ifaces::new(); + networks.iter().for_each(|(iface_name, data)| { + let mac = data.mac_address().to_string(); + let ip_addrs = data + .ip_networks() + .iter() + .map(|ipaddr| format!("{}/{}", ipaddr.addr, ipaddr.prefix)) + .collect::>(); + + let iface = Network { + iname: iface_name.to_owned(), + mac: mac, + ip_addresses: ip_addrs, + recieved: data.received(), + transmitted: data.transmitted(), + total_recieved_bytes: data.total_received(), + total_transmitted_bytes: data.total_transmitted(), + total_recieved_packets: data.total_packets_received(), + total_transmitted_packets: data.total_packets_transmitted(), + errors_on_recieved: data.errors_on_received(), + errors_on_transmitted: data.errors_on_transmitted(), + }; + ifaces.push(iface); + }); + ifaces +} + +pub mod processes { + use crate::options::structs::ProcessState; + use crate::utils::prcs::v2::Pid; + + #[derive(Debug, serde::Serialize)] + pub enum ProcessesQuery { + General(Vec), + All(Vec), + QueryGeneral, + QueryAll, + } + + #[derive(Debug, serde::Serialize)] + pub struct ProcessesGeneral { + pub name: String, + pub state: ProcessState, + pub pid: Pid, + } + #[derive(Debug, serde::Serialize)] + pub struct ProcessesAll { + pub name: String, + pub state: ProcessState, + pub pid: Pid, + pub dependencies: deps::Dependencies, + } + + pub mod deps { + use crate::options::structs::{FileTriggers, ServiceState, ServiceTriggers}; + use crate::utils::files::v2::FileState; + + // use super::*; + #[derive(Debug, serde::Serialize)] + pub struct FilesExtended { + pub name: String, + pub path: String, + pub status: FileState, + pub backup_file : String, + pub triggers: FileTriggers, + } + #[derive(Debug, serde::Serialize)] + pub struct ServicesExtended { + pub name: String, + pub access_name: String, + pub status: ServiceState, + pub triggers: ServiceTriggers, + } + + #[derive(Debug, serde::Serialize)] + pub struct Dependencies { + pub files: Vec, + pub services: Vec, + } + } +} + +pub trait MetricsExportable: Send + Sync + 'static + Debug + Any { + fn serialze_into_output(&self) -> anyhow::Result; +} + +#[derive(Serialize, Debug)] +struct FullMetrics { + hostname: String, + os: String, + kernel: String, + cpu: Cpu, + ram: Ram, + disks: Disks, + networks: Ifaces, + pub processes: Vec, +} +impl FullMetrics { + fn create( + host: HostInfo, + cpu: Cpu, + ram: Ram, + disks: Disks, + ifaces: Ifaces, + processes: Vec, + ) -> Self { + Self { + hostname: host.hostname, + os: host.os, + kernel: host.kernel, + cpu, + ram, + disks, + networks: ifaces, + processes, + } + } +} +impl MetricsExportable for FullMetrics { + fn serialze_into_output(&self) -> anyhow::Result { + Ok(serde_json::to_string_pretty(self)?) + } +} + +#[derive(Debug, Serialize)] +struct HostInfo { + hostname: String, + os: String, + kernel: String, +} + +impl MetricsExportable for HostInfo { + fn serialze_into_output(&self) -> anyhow::Result { + Ok(serde_json::to_string_pretty(self)?) + } +} + +#[derive(Debug, Serialize)] +struct HostGeneral { + hostname: String, + os: String, + kernel: String, + cpu_percentage: f32, + ram_available: u64, + disk_percentage: u64, + net_stat: u64, +} + +impl MetricsExportable for HostGeneral { + fn serialze_into_output(&self) -> anyhow::Result { + Ok(serde_json::to_string_pretty(self)?) + } +} + +#[derive(Serialize, Debug)] +struct Cpu { + global_usage: f32, + usage: CoreUsage, +} + +impl MetricsExportable for Cpu { + fn serialze_into_output(&self) -> anyhow::Result { + Ok(serde_json::to_string_pretty(self)?) + } +} + +#[derive(Serialize, Debug)] +struct CoreInfo { + name: String, + brand: String, + frequency: u64, + vendor_id: String, + usage: f32, +} + +#[derive(Serialize, Debug)] +struct Ram { + free_mem: u64, + free_swap: u64, + total_mem: u64, + total_swap: u64, +} + +impl MetricsExportable for Ram { + fn serialze_into_output(&self) -> anyhow::Result { + Ok(serde_json::to_string_pretty(self)?) + } +} + +#[derive(Serialize, Debug)] +struct Disk { + name: String, + kind: String, + fs: String, + mount_point: String, + total_space: u64, + available_space: u64, + is_removable: bool, + is_readonly: bool, +} + +impl MetricsExportable for Disks { + fn serialze_into_output(&self) -> anyhow::Result { + Ok(serde_json::to_string_pretty(self)?) + } +} + +// vec +#[derive(Serialize, Debug)] +struct Network { + iname: String, + mac: String, + ip_addresses: Vec, + recieved: u64, + transmitted: u64, + total_recieved_bytes: u64, + total_transmitted_bytes: u64, + total_recieved_packets: u64, + total_transmitted_packets: u64, + errors_on_recieved: u64, + errors_on_transmitted: u64, +} + +impl MetricsExportable for Ifaces { + fn serialze_into_output(&self) -> anyhow::Result { + Ok(serde_json::to_string_pretty(self)?) + } +} + +#[derive(Serialize, Debug)] +pub struct ProcessExtended { + name: String, + status: ProcessState, + pid: Pid, + start_time: String, + duration: String, + dependencies: processes::deps::Dependencies, + cpu_usage: f32, + ram_usage: u64, + virtual_mem_usage: u64, + disks_usage_read_bytes: u64, + disks_usage_write_bytes: u64, +} + +impl ProcessExtended { + pub fn from_process_query_all(system: &mut System, proc: processes::ProcessesAll) -> Self { + system.refresh_processes(sysinfo::ProcessesToUpdate::All, true); + return if let Some(prc) = system.process(proc.pid.new_sysinfo_pid()) { + let disk_usage = prc.disk_usage(); + let duration = chrono::Duration::new(prc.run_time() as i64, 0); + let start_time = chrono::DateTime::from_timestamp(prc.start_time() as i64, 0); + Self { + name: proc.name, + status: proc.state, + pid: proc.pid, + start_time : { + match start_time { + Some(date) => date.to_string(), + None => String::new() + } + }, + duration: { + match duration { + Some(duration) => { + format!("{}:{}:{}:{}", + duration.num_days(), + duration.num_hours() % 24, + duration.num_minutes() % 60, + duration.num_seconds() % 60 + ) + }, + None => String::new() + } + }, + dependencies: proc.dependencies, + cpu_usage: prc.cpu_usage(), + ram_usage: prc.memory(), + virtual_mem_usage: prc.virtual_memory(), + disks_usage_read_bytes: disk_usage.read_bytes, + disks_usage_write_bytes: disk_usage.written_bytes, + } + } else { + Self { + name: proc.name, + status: proc.state, + pid: proc.pid, + start_time : String::new(), + duration: String::new(), + dependencies: proc.dependencies, + cpu_usage: 0.0, + ram_usage: 0, + virtual_mem_usage: 0, + disks_usage_read_bytes: 0, + disks_usage_write_bytes: 0, + } + }; + } +} + +impl MetricsExportable for MetricProcesses { + fn serialze_into_output(&self) -> anyhow::Result { + Ok(serde_json::to_string_pretty(self)?) + } } #[cfg(test)] diff --git a/noxis-rs/src/utils/prcs.rs b/noxis-rs/src/utils/prcs.rs index 3f10903..d6daf6e 100644 --- a/noxis-rs/src/utils/prcs.rs +++ b/noxis-rs/src/utils/prcs.rs @@ -1,24 +1,46 @@ +use crate::options::structs::{Events, NegativeOutcomes, ProcessState, ProcessUnit}; +use async_trait::async_trait; use log::{error, warn}; +use serde::Serialize; +use std::collections::HashSet; use std::process::{Command, Output}; use std::sync::Arc; -use tokio::time::Duration; -use crate::options::structs::{ProcessState, Events, NegativeOutcomes, ProcessUnit}; -use std::collections::HashSet; use tokio::sync::mpsc::Receiver as MpscReciever; -use async_trait::async_trait; +use tokio::time::Duration; pub mod v2 { - use log::info; use crate::options::structs::DependencyType; + use crate::utils::metrics::processes::ProcessesGeneral; + use log::info; use std::path::Path; + use tokio::time::sleep; use super::*; - + + #[derive(Debug, Serialize, Clone, Copy)] + pub struct Pid(u32); + + impl std::fmt::Display for Pid { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + return write!(f, "{}", self.0); + } + } + + impl Pid { + fn new() -> Self { + Pid(0) + } + #[allow(unused)] + pub fn new_sysinfo_pid(&self) -> sysinfo::Pid { + sysinfo::Pid::from_u32(self.0 as u32) + } + } + #[derive(Debug)] pub struct ProcessesController { - name: Arc, + pub name: Arc, + pid: Pid, bin: String, - // obj: Arc, state: ProcessState, event_reader: MpscReciever, negative_events: HashSet>, @@ -31,72 +53,259 @@ pub mod v2 { } impl ProcessesController { + #[inline(always)] pub fn new(name: &str, event_reader: MpscReciever) -> ProcessesController { ProcessesController { - name : Arc::from(name), - bin : String::new(), - state : ProcessState::Stopped, + name: Arc::from(name), + pid: Pid::new(), + bin: String::new(), + state: ProcessState::Stopped, event_reader, - negative_events : HashSet::new(), + negative_events: HashSet::new(), } } + #[inline(always)] pub fn with_exe(mut self, bin: impl AsRef) -> ProcessesController { self.bin = bin.as_ref().to_string_lossy().into_owned(); self } - + pub fn get_pid(&self) -> Pid { + self.pid + } + pub fn get_state(&self) -> ProcessState { + self.state + } async fn trigger_on(&mut self, dep_name: &str, trigger: &str, dep_type: DependencyType) { match trigger { "stay" => { - info!("Event on {} `{}` for {}. Ignoring ...", dep_type, dep_name, self.name); - }, + info!( + "Event on {} `{}` for {}. Ignoring ...", + dep_type, dep_name, self.name + ); + } "stop" => { if is_active(&self.name).await { - info!("Event on {} `{}` for {}. Stopping ...", dep_type, dep_name, self.name); - terminate_process(&self.name).await; - self.state = ProcessState::Stopped; + info!( + "Event on {} `{}` for {}. Stopping ...", + dep_type, dep_name, self.name + ); + match terminate_process(&self.name).await { + Ok(_) => { + info!("Process {} was stopped ...", &self.name); + self.state = ProcessState::Stopped; + self.pid = Pid::new(); + } + Err(er) => { + error!("Cannot stop process {} : {}", self.name, er); + } + } } - }, + } + "user-stop" => { + if is_active(&self.name).await { + info!( + "Event on {} `{}` for {}. Stopping ...", + dep_type, "User Stop Call", self.name + ); + match terminate_process(&self.name).await { + Ok(_) => { + info!("Process {} was forcefully stopped ...", &self.name); + self.state = ProcessState::StoppedByCli; + self.pid = Pid::new(); + } + Err(er) => { + error!("Cannot forcefully stop process {} : {}", self.name, er); + } + } + } + } + "user-hold" => { + if is_active(&self.name).await { + info!( + "Event on {} `{}` for {}. Stopping ...", + dep_type, "User Hold Call", self.name + ); + match freeze_process(&self.name).await { + Ok(_) => { + info!("Process {} was forcefully frozen ...", &self.name); + self.state = ProcessState::HoldingByCli; + } + Err(er) => { + error!("Cannot forcefully freeze process {} : {}", self.name, er); + } + } + } + } "hold" => { if !is_frozen(&self.name).await { - info!("Event on {} `{}` for {}. Freezing ...", dep_type, dep_name, self.name); - freeze_process(&self.name).await; - self.state = ProcessState::Holding; + info!( + "Event on {} `{}` for {}. Freezing ...", + dep_type, dep_name, self.name + ); + match freeze_process(&self.name).await { + Ok(_) => { + info!("Process {} was frozen ...", &self.name); + self.state = ProcessState::Holding; + } + Err(er) => { + error!("Cannot freeze process {} : {}", self.name, er); + } + } } - }, + } "restart" => { - info!("Event on {} `{}` for {}. Restarting ...", dep_type, dep_name, self.name); - let _ = restart_process(&self.name, &self.bin).await; - }, - _ => error!("Impermissible trigger in file-trigger for {}. Ignoring event ...", self.name), + info!( + "Event on {} `{}` for {}. Restarting ...", + dep_type, dep_name, self.name + ); + let pid = restart_process(&self.name, &self.bin).await; + sleep(Duration::from_millis(100)).await; + if let Ok(pid) = pid { + self.pid = Pid(pid); + info!("{}: New PID - {}", self.name, self.pid); + } + } + _ => error!( + "Impermissible trigger in file-trigger for {}. Ignoring event ...", + self.name + ), } tokio::time::sleep(Duration::from_micros(100)).await; } + #[allow(unused)] + pub async fn stop_by_user_call(&mut self) -> anyhow::Result<()> { + terminate_process(&self.name).await?; + warn!("Process {} was stopped by user call ...", self.name); + self.state = ProcessState::StoppedByCli; + self.pid = Pid::new(); + Ok(()) + } + #[allow(unused)] + pub async fn freeze_by_user_call(&mut self) -> anyhow::Result<()> { + freeze_process(&self.name).await?; + warn!("Process {} was frozen by user call ...", self.name); + self.state = ProcessState::HoldingByCli; + Ok(()) + } + #[allow(unused)] + pub async fn start_by_user_call(&mut self) -> anyhow::Result<()> { + if self.negative_events.is_empty() { + let pid = start_process(&self.name, &self.bin).await?; + warn!("Process {} was started by user call ...", self.name); + self.state = ProcessState::Pending; + self.pid = Pid(pid); + return Ok(()); + } else { + warn!("Attempt to start process {} by user call was stopped due to existance of negative incidents ...", self.name); + return Err(anyhow::Error::msg( + format!("Attempt to start process {} by user call was stopped due to existance of negative incidents ...", self.name) + )); + } + } + #[allow(unused)] + pub async fn unfreeze_by_user_call(&mut self) -> anyhow::Result<()> { + if self.negative_events.is_empty() { + unfreeze_process(&self.name).await?; + warn!("Process {} was unfrozen by user call ...", self.name); + self.state = ProcessState::Pending; + Ok(()) + } else { + warn!("Attempt to unfreeze process {} by user call was stopped due to existance of negative incidents ...", self.name); + return Err(anyhow::Error::msg( + format!("Attempt to unfreeze process {} by user call was stopped due to existance of negative incidents ...", self.name) + )); + } + } + #[allow(unused)] + pub async fn restart_by_user_call(&mut self) -> anyhow::Result<()> { + let pid = restart_process(&self.name, &self.bin).await?; + warn!("Process {} was restarted by user call ...", self.name); + self.pid = Pid(pid); + Ok(()) + } + + pub async fn get_general_info(&self) -> ProcessesGeneral { + ProcessesGeneral { + name: self.name.to_string(), + state: self.state, + pid: self.pid, + } + } } #[async_trait] impl ProcessUnit for ProcessesController { async fn process(&mut self) { if self.negative_events.len() == 0 { - match self.state { - ProcessState::Holding => { - info!("No negative dependecies events on {} process. Unfreezing ...", self.name); + let conditions = (is_active(&self.name).await, is_frozen(&self.name).await); + let state = &self.state; + match (state, conditions) { + (ProcessState::Holding, (_, _)) => { + info!( + "No negative dependecies events on {} frozen process. Unfreezing ...", + self.name + ); + if let Err(er) = unfreeze_process(&self.name).await { + if er.to_string().contains("already") { + self.state = ProcessState::Pending; + } else { + error!("Cannot unfreeze process {} : {}", self.name, er); + } + } else { + self.state = ProcessState::Pending; + info!("Process {} was unfreezed", &self.name); + } + } + (ProcessState::Stopped, (_, _)) => { + info!( + "No negative dependecies events on stopped {} process. Starting ...", + self.name + ); + match start_process(&self.name, &self.bin).await { + Ok(pid) => { + self.state = ProcessState::Pending; + self.pid = Pid(pid); + info!("{}: New PID - {}", self.name, self.pid); + } + Err(er) => { + if er.to_string().contains("already") { + self.state = ProcessState::Pending; + } else { + error!("Cannot start process {} : {}", self.name, er); + } + } + } + } + (ProcessState::Pending, (false, false)) => { + info!( + "{} process was impermissibly stopped. Starting ...", + self.name + ); + match start_process(&self.name, &self.bin).await { + Ok(pid) => { + self.state = ProcessState::Pending; + self.pid = Pid(pid); + info!("{}: New PID - {}", self.name, self.pid); + } + Err(er) => { + error!("Cannot start process {} : {}", self.name, er); + } + } + } + (ProcessState::Pending, (true, true)) => { + info!( + "No negative dependecies events on {} process. Unfreezing ...", + self.name + ); if let Err(er) = unfreeze_process(&self.name).await { error!("Cannot unfreeze process {} : {}", self.name, er); } else { self.state = ProcessState::Pending; + info!("Process {} was unfreezed", &self.name); } - }, - ProcessState::Stopped => { - info!("No negative dependecies events on {} process. Starting ...", self.name); - if let Err(er) = start_process(&self.name, &self.bin).await { - error!("Cannot start process {} : {}", self.name, er); - } else { - self.state = ProcessState::Pending; - } - }, - _ => {}, - } + } + _ => {} + } } while let Ok(event) = self.event_reader.try_recv() { match event { @@ -104,22 +313,16 @@ pub mod v2 { if self.negative_events.contains(&target) { self.negative_events.remove(&target); } - }, - Events::Negative(event) => { - match event { - NegativeOutcomes::FileWasChanged(target, dep_type, trigger) | - NegativeOutcomes::FileWasMovedOrDeleted(target, dep_type, trigger) | - NegativeOutcomes::ServiceIsUnreachable(target, dep_type, trigger) => { - if !self.negative_events.contains(&target) { - self.negative_events.insert(target.clone()); - - self.trigger_on( - &target, - &trigger, - dep_type - ).await; - } - }, + } + Events::Negative(event) => match event { + NegativeOutcomes::FileWasChanged(target, dep_type, trigger) + | NegativeOutcomes::FileWasMovedOrDeleted(target, dep_type, trigger) + | NegativeOutcomes::ServiceIsUnreachable(target, dep_type, trigger) => { + if !self.negative_events.contains(&target) { + self.negative_events.insert(target.clone()); + + self.trigger_on(&target, &trigger, dep_type).await; + } } }, } @@ -130,7 +333,7 @@ pub mod v2 { /// # Fn `get_pid` /// ## for initializing process of unstoppable grubbing metrics. -/// +/// /// *input* : `&str` /// /// *output* : `None` if cant get process PID | `Some(Output)` on success @@ -139,8 +342,8 @@ pub mod v2 { /// /// *managing* : process name /// -/// *depends on* : - -/// +/// *depends on* : - +/// pub async fn get_pid(name: &str) -> Option { let name = Arc::new(name.to_string()); let res = @@ -159,7 +362,7 @@ pub async fn get_pid(name: &str) -> Option { /// # Fn `is_active` /// ## for checking process's activity state -/// +/// /// *input* : `&str` /// /// *output* : `true` if process running | `false` if not @@ -168,8 +371,8 @@ pub async fn get_pid(name: &str) -> Option { /// /// *managing* : process name /// -/// *depends on* : - -/// +/// *depends on* : - +/// pub async fn is_active(name: &str) -> bool { let arc_name = Arc::new(name.to_string()); tokio::task::spawn_blocking(move || { @@ -188,7 +391,7 @@ pub async fn is_active(name: &str) -> bool { /// # Fn `is_frozen` /// ## for checking process's hibernation state -/// +/// /// *input* : `&str` /// /// *output* : `true` if process is frozen | `false` if not @@ -198,7 +401,7 @@ pub async fn is_active(name: &str) -> bool { /// *managing* : process name /// /// *depends on* : fn `get_pid` -/// +/// pub async fn is_frozen(name: &str) -> bool { let temp: Output; if let Some(output) = get_pid(name).await { @@ -229,7 +432,7 @@ pub async fn is_frozen(name: &str) -> bool { /// # Fn `terminate_process` /// ## for stop current process -/// +/// /// *input* : `&str` /// /// *output* : () @@ -239,20 +442,21 @@ pub async fn is_frozen(name: &str) -> bool { /// *managing* : process name /// /// *depends on* : - -/// -pub async fn terminate_process(name: &str) { - let _ = Command::new("pkill") - .arg(name) - .output() - .unwrap_or_else(|_| { - error!("Failed to execute command 'pkill'"); - std::process::exit(101); - }); +/// +pub async fn terminate_process(name: &str) -> anyhow::Result<()> { + if !is_active(name).await { + return Err(anyhow::Error::msg(format!( + "Process {} is already stopped", + name + ))); + } + let _ = Command::new("pkill").arg(name).output()?; + Ok(()) } /// # Fn `terminate_process` /// ## for freeze/hibernate current process -/// +/// /// *input* : `&str` /// /// *output* : () @@ -262,20 +466,15 @@ pub async fn terminate_process(name: &str) { /// *managing* : process name /// /// *depends on* : - -/// -pub async fn freeze_process(name: &str) { - let _ = Command::new("pkill") - .args(["-STOP", name]) - .output() - .unwrap_or_else(|_| { - error!("Failed to freeze process"); - std::process::exit(101); - }); +/// +pub async fn freeze_process(name: &str) -> anyhow::Result<()> { + let _ = Command::new("pkill").args(["-STOP", name]).output()?; + Ok(()) } /// # Fn `unfreeze_process` /// ## for unfreeze/hibernate current process -/// +/// /// *input* : `&str` /// /// *output* : () @@ -285,17 +484,15 @@ pub async fn freeze_process(name: &str) { /// *managing* : process name /// /// *depends on* : - -/// +/// pub async fn unfreeze_process(name: &str) -> anyhow::Result<()> { - let _ = Command::new("pkill") - .args(["-CONT", name]) - .output()?; + let _ = Command::new("pkill").args(["-CONT", name]).output()?; Ok(()) } /// # Fn `restart_process` /// ## for restarting current process -/// +/// /// *input* : `&str`, &str /// /// *output* : () @@ -305,16 +502,16 @@ pub async fn unfreeze_process(name: &str) -> anyhow::Result<()> { /// *managing* : process name and path to its exec file /// /// *depends on* : fn `start_process`, fn `terminate_process` -/// -pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<()> { - terminate_process(name).await; +/// +pub async fn restart_process(name: &str, path: &str) -> anyhow::Result { + terminate_process(name).await?; tokio::time::sleep(Duration::from_millis(100)).await; start_process(name, path).await } /// # Fn `start_process` /// ## for starting current process -/// +/// /// *input* : `&str`, &str /// /// *output* : () @@ -324,20 +521,27 @@ pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<()> { /// *managing* : process name and path to its exec file /// /// *depends on* : - -/// -pub async fn start_process(name: &str, path: &str) -> anyhow::Result<()> { - // let runsh = format!("{} {}", "exec", path); +/// +pub async fn start_process(name: &str, path: &str) -> anyhow::Result { + if is_active(name).await { + return Err(anyhow::Error::msg(format!( + "Process {} is already running", + name + ))); + } let mut command = Command::new(path); // command.arg(path); match command.spawn() { - Ok(_) => { + Ok(child) => { + let pid = child.id(); warn!("Process {} is running now!", name); - Ok(()) - } - Err(er) => { - Err(anyhow::Error::msg(format!("Cannot start process {} due to {}", name, er))) + Ok(pid) } + Err(er) => Err(anyhow::Error::msg(format!( + "Cannot start process {} : {}", + name, er + ))), } } @@ -356,8 +560,7 @@ mod process_unittests { // let _ = std::io::stdout().write_all(b""); let res1 = start_process("restart-prc", "./tests/examples/restart-prc").await; assert!(res1.is_ok()); - let res2 = - restart_process("restart-prc", "./tests/examples/restart-prc").await; + let res2 = restart_process("restart-prc", "./tests/examples/restart-prc").await; assert!(res2.is_ok()); let _ = terminate_process("restart-prc").await; let res3 = is_active("restart-prc").await; @@ -384,6 +587,7 @@ mod process_unittests { let res1 = start_process("freeze-check", "./tests/examples/freeze-check").await; assert!(res1.is_ok()); assert!(!is_frozen("freeze-check").await); + let _ = terminate_process("freeze-check").await; } #[tokio::test] async fn pidof_active_process() { diff --git a/noxis-rs/src/utils/services.rs b/noxis-rs/src/utils/services.rs index a381cb1..c24171c 100644 --- a/noxis-rs/src/utils/services.rs +++ b/noxis-rs/src/utils/services.rs @@ -1,18 +1,20 @@ -use crate::options::structs::CustomError; -use log::{error, warn}; -use std::net::{TcpStream, ToSocketAddrs}; -use std::sync::Arc; -use tokio::time::Duration; -use tokio::sync::mpsc::Sender as Sender; use async_trait::async_trait; +use futures::future::Future; +use log::{error, warn}; +use std::net::ToSocketAddrs; +use std::pin::Pin; +use std::sync::Arc; +use tokio::sync::mpsc::Sender; +use tokio::time::Duration; pub mod v2 { + use futures::FutureExt; use log::info; - use crate::options::structs::{Triggers, ProcessUnit, Events, ServiceState}; + use crate::options::structs::{Events, ProcessUnit, ServiceState, Triggers}; use super::*; - use std::collections::{HashMap, BTreeMap, VecDeque}; + use std::collections::{BTreeMap, HashMap, VecDeque}; type MpscSender = Arc>; // type EventHandlers<'a> = Vec>>; @@ -24,45 +26,43 @@ pub mod v2 { pub struct ServicesController { // i.e. yandex.ru #[allow(unused)] - name : String, + name: String, // i.e. yandex.ru:443 - access_url : Arc, + access_url: Arc, // "OK" or "Unavailable" state: ServiceState, // btree map with key as max wait time and it's key to hashmap config: ConnectionQueue, // Map of processes with their (trigger and mpsc sender) - event_registrator : EventHandlers, + event_registrator: EventHandlers, } impl PartialEq for ServicesController { fn eq(&self, other: &Self) -> bool { - self.access_url == other.access_url + self.access_url == other.access_url } } impl ServicesController { + #[inline(always)] pub fn new() -> ServicesController { ServicesController { - name : String::new(), - access_url : Arc::from(String::new()), - state : ServiceState::Unavailable, + name: String::new(), + access_url: Arc::from(String::new()), + state: ServiceState::Ok, config: ConnectionQueue::new(), - event_registrator : EventHandlers::new(), + event_registrator: EventHandlers::new(), } } - pub fn with_access_name( - mut self, - hostname: &str, - access_url: &str, - ) -> ServicesController { + #[inline(always)] + pub fn with_access_name(mut self, hostname: &str, access_url: &str) -> ServicesController { self.name = hostname.to_string(); self.access_url = Arc::from(access_url); self } - + #[inline(always)] pub fn with_params( - mut self, + mut self, conn_queue: ConnectionQueue, event_reg: EventHandlers, ) -> ServicesController { @@ -72,99 +72,185 @@ pub mod v2 { } pub fn get_access_url(hostname: &str, port: Option<&u32>) -> String { - format!("{}{}", hostname, port.map_or_else(|| "".to_string(), |p| format!(":{}", p))) + format!( + "{}{}", + hostname, + port.map_or_else(|| "".to_string(), |p| format!(":{}", p)) + ) } - pub fn add_process( - &mut self, - proc_name: &str, - trigger: Triggers, - sender: MpscSender, - ) { + pub fn get_state(&self) -> ServiceState { + self.state + } + pub fn add_process(&mut self, proc_name: &str, trigger: Triggers, sender: MpscSender) { let proc_name: Arc = Arc::from(proc_name); // queue add if let Triggers::Service { wait, .. } = trigger { - self.config.entry(wait) - .and_modify(|el| el.push_back(proc_name.clone())) - .or_insert({ - let mut temp = VecDeque::new(); - temp.push_back(proc_name.clone()); - temp - }); + self.config + .entry(wait) + .and_modify(|el| el.push_back(proc_name.clone())) + .or_insert({ + let mut temp = VecDeque::new(); + temp.push_back(proc_name.clone()); + temp + }); } // event add - self.event_registrator.entry(proc_name).or_insert((trigger, sender)); + self.event_registrator + .entry(proc_name) + .or_insert((trigger, sender)); } async fn check_state(&self) -> anyhow::Result<()> { - let mut addrs = self.access_url.to_socket_addrs()?; - if !addrs.any(|a| TcpStream::connect_timeout(&a, Duration::new(1, 0)).is_ok()) { - return Err(anyhow::Error::msg(format!("No access to service `{}`", &self.access_url))) + let url = self.access_url.clone(); + let resolve_future = tokio::task::spawn_blocking(move || url.to_socket_addrs()); + let addrs: Vec<_> = + match tokio::time::timeout(Duration::from_secs(1), resolve_future).await { + Ok(Ok(addrs)) => addrs?.collect(), + Ok(Err(er)) => return Err(er.into()), + Err(_) => return Err(anyhow::Error::msg("DNS resolution timeout")), + }; + + if addrs.is_empty() { + return Err(anyhow::Error::msg("No addresses resolved")); } + + let tasks: Vec<_> = addrs + .into_iter() + .map(|addr| async move { + match tokio::time::timeout( + Duration::from_secs(2), + tokio::net::TcpStream::connect(&addr), + ) + .await + { + Ok(Ok(_)) => Some(addr), + _ => None, + } + }) + .collect(); + let mut any_success = false; + for task in futures::future::join_all(tasks).await { + if task.is_some() { + any_success = true; + break; + } + } + if !any_success { + return Err(anyhow::Error::msg(format!( + "No access to service `{}`", + &self.access_url + ))); + } + Ok(()) } async fn trigger_on(&mut self) { match self.state { ServiceState::Ok => { - let _ = self.event_registrator - .iter() - .map(|(_, (_, el))| async { - let _ = el.send(Events::Positive(self.access_url.clone())).await; - }); - }, + let futures: Vec + Send>>> = self + .event_registrator + .iter() + .map(|(prc, (_, sender_opt))| (prc, (self.access_url.clone(), sender_opt))) + .map(|(prc, (serv, sender_opt))| async move { + info!("Notifying process {} ...", prc); + let _ = sender_opt.send(Events::Positive(serv.clone())).await; + }) + .map(|fut| fut.boxed()) + .collect(); + + futures::future::join_all(futures).await; + } ServiceState::Unavailable => { // looped check and notifying self.looped_check().await; - }, + } } } async fn looped_check(self: &mut Self) { let longest = self.config.last_entry().unwrap(); let longest = longest.key(); let mut interapter = tokio::time::interval(tokio::time::Duration::from_secs(1)); - let timer = tokio::time::Instant::now(); + let timer = tokio::time::Instant::now(); let mut attempt: u32 = 1; let access_url = Arc::new(self.access_url.clone()); - // let event_registrator = &mut self.event_registrator; - if let Err(_) = tokio::time::timeout(tokio::time::Duration::from_secs((longest + 1) as u64), async { - // let access_url = access_url.clone(); - loop { - interapter.tick().await; - info!("Trying to connect to {} (attempt: {}) ...", &access_url, attempt); - attempt += 1; + if let Err(_) = tokio::time::timeout( + tokio::time::Duration::from_secs((longest + 1) as u64), + async { + // let access_url = access_url.clone(); + loop { + interapter.tick().await; + info!( + "Trying to connect to {} (attempt: {}) ...", + &access_url, attempt + ); + attempt += 1; - let state_check_result = self.check_state().await; + let state_check_result = self.check_state().await; - if state_check_result.is_ok() { - info!("Connection to {} is `OK` now", &access_url); - self.state = ServiceState::Ok; - break; - } else { - let now = timer.elapsed(); - let iterator = self.config.iter() - .filter(|(&a, _)| tokio::time::Duration::from_secs(a as u64) <= now) - .flat_map(|(_, a)| a.iter().cloned()) - .collect::>>(); + if state_check_result.is_ok() { + info!("Connection to {} is `OK` now", &access_url); + self.state = ServiceState::Ok; + let futures: Vec + Send>>> = self + .event_registrator + .iter() + .map(|(prc, (_, sender_opt))| { + (prc, (self.access_url.clone(), sender_opt)) + }) + .map(|(prc, (serv, sender_opt))| async move { + info!("Notifying process {} ...", prc); + let _ = sender_opt.send(Events::Positive(serv.clone())); + }) + .map(|fut| fut.boxed()) + .collect(); - for name in iterator { - let proc_name = name.to_string(); - info!("Trying to notify process `{}` ...", &proc_name); - let sender_opt = self.event_registrator.get(&name) - .map(|(trigger, sender)| - (trigger.to_service_negative_event(name.clone()), sender) - ); + futures::future::join_all(futures).await; + break; + } else { + let now = timer.elapsed(); - if let Some((tr, tx)) = sender_opt { - let _ = tx.send(tr.unwrap()).await; - } else { - error!("Cannot find {} channel sender in {} service", name.clone(), &self.access_url) + let iterator = self + .config + .iter() + .filter(|(&wait, _)| { + tokio::time::Duration::from_secs(wait as u64) <= now + }) + .flat_map(|(_, a)| a.iter().cloned()) + .collect::>>(); + + for name in iterator { + let proc_name = name.to_string(); + info!("Trying to notify process `{}` ...", &proc_name); + let sender_opt = + self.event_registrator.get(&name).map(|(trigger, sender)| { + ( + trigger + .to_service_negative_event(self.access_url.clone()), + sender, + ) + }); + + if let Some((tr, tx)) = sender_opt { + let _ = tx.send(tr.unwrap()).await; + } else { + error!( + "Cannot find {} channel sender in {} service", + name.clone(), + &self.access_url + ) + } } } } - } - }).await { + }, + ) + .await + { info!("Timeout of establishing connection to {}. ", &access_url); } } + pub fn get_arc_access_url(&self) -> Arc { + self.access_url.clone() + } } #[async_trait] impl ProcessUnit for ServicesController { @@ -176,181 +262,30 @@ pub mod v2 { warn!("Connection with `{}` service was established. Notifying {} process(es) ...", &self.access_url, &self.event_registrator.len()); self.state = ServiceState::Ok; self.trigger_on().await; - }, + } (ServiceState::Ok, Err(_)) => { - warn!("Unreachable for connection service `{}`. Notifying {} process(es) ...", &self.access_url, &self.event_registrator.len()); + warn!("Unreachable for connection service `{}`. Initializing reconnect mechanism ...", &self.access_url); self.state = ServiceState::Unavailable; - self.trigger_on().await; - }, - (ServiceState::Unavailable, Err(_)) => warn!("Service {} is still unreachable", &self.access_url), - _ => { /* DEAD END WITH NO INTEREST */ }, + self.trigger_on().await; + } + (ServiceState::Unavailable, Err(_)) => { + warn!("Service {} is still unreachable", &self.access_url) + } + _ => { /* DEAD END WITH NO INTEREST */ } } } } } -/// # Fn `service_handler` -/// ## function to realize mechanism of current process' dep services monitoring -/// -/// *input* : `&str`, `&Vec`, `Arc>` -/// -/// *output* : () -/// -/// *initiator* : fn `utils::running_handler` -/// -/// *managing* : process name, ref of vec of dep services, ref counter to managing channel writer -/// -/// *depends on* : fn `check_service`, fn `utils::prcs::is_active`, fn `utils::prcs::is_frozen`, fn `looped_service_connecting` -/// -// pub async fn service_handler( -// name: &str, -// services: &Vec, -// tx: Arc>, -// ) -> Result<(), CustomError> { -// // println!("service daemon on {}", name); -// for serv in services { -// if check_service(&serv.hostname, &serv.port).await.is_err() { -// if !is_active(name).await || is_frozen(name).await { -// return Err(CustomError::Fatal); -// } -// error!( -// "Service {}:{} is unreachable for process {}", -// &serv.hostname, &serv.port, &name -// ); -// match serv.triggers.on_lost.as_str() { -// "stay" => { -// tx.send(4).await.unwrap(); -// continue; -// } -// "stop" => { -// if looped_service_connecting(name, serv).await.is_err() { -// tx.send(5).await.unwrap(); -// tokio::task::yield_now().await; -// return Err(CustomError::Fatal); -// } -// } -// "hold" => { -// // if is_frozen(name).await { -// // return Err(CustomError::Fatal); -// // } -// if looped_service_connecting(name, serv).await.is_err() { -// tx.send(6).await.unwrap(); -// tokio::task::yield_now().await; -// return Err(CustomError::Fatal); -// } -// } -// _ => { -// tx.send(101).await.unwrap(); -// return Err(CustomError::Fatal); -// } -// } -// } -// } -// tokio::time::sleep(Duration::from_millis(100)).await; -// Ok(()) -// } - -/// # Fn `looped_service_connecting` -/// ## for service's state check in loop (with delay and restriction of attempts) -/// -/// *input* : `&str`, `&Services` -/// -/// *output* : Ok(()) if service now available | Err(er) if still not -/// -/// *initiator* : fn `service_handler` -/// -/// *managing* : process name, current service struct -/// -/// *depends on* : fn `check_service` -/// -// async fn looped_service_connecting(name: &str, serv: &Services) -> Result<(), CustomError> { -// if serv.triggers.wait == 0 { -// loop { -// tokio::time::sleep(Duration::from_secs(serv.triggers.delay.into())).await; -// warn!( -// "Attempting to connect from {} process to {}:{}", -// &name, &serv.hostname, &serv.port -// ); -// match check_service(&serv.hostname, &serv.port).await { -// Ok(_) => { -// log::info!( -// "Successfully connected to {} from {} process!", -// &serv.hostname, -// &name -// ); -// break; -// } -// Err(_) => { -// tokio::task::yield_now().await; -// } -// } -// } -// Ok(()) -// } else { -// let start = Instant::now(); -// while start.elapsed().as_secs() < serv.triggers.wait.into() { -// tokio::time::sleep(Duration::from_secs(serv.triggers.delay.into())).await; -// warn!( -// "Attempting to connect from {} process to {}:{}", -// &name, &serv.hostname, &serv.port -// ); -// match check_service(&serv.hostname, &serv.port).await { -// Ok(_) => { -// log::info!( -// "Successfully connected to {} from {} process!", -// &serv.hostname, -// &name -// ); -// return Ok(()); -// } -// Err(_) => { -// tokio::task::yield_now().await; -// } -// } -// } -// Err(CustomError::Fatal) -// } -// } - -/// # Fn `check_service` -/// ## for check current service's availiability -/// -/// *input* : `&str`, `&u32` -/// -/// *output* : Ok(()) if service now available | Err(er) if still not -/// -/// *initiator* : fn `service_handler`, fn `looped_service_connecting` -/// -/// *managing* : hostname, port -/// -/// *depends on* : - -/// -// ! have to be rewritten -// todo: rewrite use -async fn check_service(hostname: &str, port: &u32) -> Result<(), CustomError> { - let addr = format!("{}:{}", hostname, port); - - match addr.to_socket_addrs() { - Ok(mut addrs) => { - if addrs.any(|a| TcpStream::connect_timeout(&a, Duration::new(1, 0)).is_ok()) { - Ok(()) - } else { - Err(CustomError::Fatal) - } - } - Err(_) => Err(CustomError::Fatal), - } -} - #[cfg(test)] mod service_unittests { - use super::check_service; - #[tokio::test] - async fn check_available_service() { - assert!(check_service("ya.ru", &443).await.is_ok()); - } - #[tokio::test] - async fn check_unavailable_service() { - assert!(check_service("unavailable.service", &1111).await.is_err()); - } + // use super::check_service; + // #[tokio::test] + // async fn check_available_service() { + // assert!(check_service("ya.ru", &443).await.is_ok()); + // } + // #[tokio::test] + // async fn check_unavailable_service() { + // assert!(check_service("unavailable.service", &1111).await.is_err()); + // } } diff --git a/noxis-rs/tests/examples/none.json b/noxis-rs/tests/examples/none.json index e6c4897..224fb83 100644 --- a/noxis-rs/tests/examples/none.json +++ b/noxis-rs/tests/examples/none.json @@ -1,5 +1,7 @@ { "dateOfCreation": "1", - "configServer": "", + "processes": [] +} +, "processes": [] } diff --git a/noxis-rs/tests/examples/save-conf.json b/noxis-rs/tests/examples/save-conf.json index e6c4897..224fb83 100644 --- a/noxis-rs/tests/examples/save-conf.json +++ b/noxis-rs/tests/examples/save-conf.json @@ -1,5 +1,7 @@ { "dateOfCreation": "1", - "configServer": "", + "processes": [] +} +, "processes": [] } diff --git a/noxis-rs/tests/examples/settings.json b/noxis-rs/tests/examples/settings.json index 8c80261..324275d 100644 --- a/noxis-rs/tests/examples/settings.json +++ b/noxis-rs/tests/examples/settings.json @@ -1,6 +1,5 @@ { "dateOfCreation": "1721381809103", - "configServer" : "localhost", "processes": [ { "name": "temp-process", @@ -12,7 +11,8 @@ "src": "/home/vladislav/web/runner-rs/examples/", "triggers": { "onDelete": "hold", - "onChange": "stop" + "onChange": "stop", + "doRestore" : true } } ],