migrate #42

Merged
VladislavD merged 68 commits from migrate into rc 2025-06-23 15:35:11 +03:00
41 changed files with 3296 additions and 2372 deletions

40
.github/workflows/rust_riscv_build.yml vendored Normal file
View File

@ -0,0 +1,40 @@
name: Rust riscv64 build
on:
push:
branches: [ master , rc ]
pull_request:
branches: [ master , rc ]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
run: sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config build-essential gcc-riscv64-unknown-elf gcc-riscv64-linux-gnu binutils-riscv64-linux-gnu
- name: Set up Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Set up riscv64gc toolchain
run: rustup target add riscv64gc-unknown-linux-gnu
- name: Build x86_64-unknown-linux-gnu
run: cargo riscv64 --verbose

37
.github/workflows/rust_test.yml vendored Normal file
View File

@ -0,0 +1,37 @@
name: Rust test
on:
push:
branches: [ master , rc ]
pull_request:
branches: [ master , rc ]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
run: sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config build-essential
- name: Set up Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Run tests
run: cargo test --verbose

37
.github/workflows/rust_x86_build.yml vendored Normal file
View File

@ -0,0 +1,37 @@
name: Rust x86_64 build
on:
push:
branches: [ master , rc ]
pull_request:
branches: [ master , rc ]
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
run: sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config build-essential
- name: Set up Rust
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Build x86_64-unknown-linux-gnu
run: cargo x86_64 --verbose

6
.gitignore vendored
View File

@ -1,7 +1,9 @@
/target /target
.idea .idea
/.env .env
Cargo.lock Cargo.lock
hagent_test.sock hagent_test.sock
release release
*.sock *.sock
*.bak
docker-compose.yml

View File

@ -2,7 +2,7 @@
resolver = "2" resolver = "2"
members = [ members = [
"noxis-rs", "noxis-rs",
"noxis-cli", "noxis-cli", "noxis-proxy",
] ]
[profile.dev] [profile.dev]

View File

@ -1,10 +1,20 @@
# noxis-rs # noxis
![Logo](logo.png) ![Logo](logo.png)
### In-container integrating util to handle processes runtime
( with amd64 and riscv64 support )
## Depends on `noxis` - monitoring util with special attention on
1) **Speed**
2) **Multiplatform** execution *(with `amd64` and `riscv64` **support**)*
3) **Smallness** and **Optimization**
It's **main tasks** are
- to manage the processes that occur inside the container or in the target system.
- collect data (metrics);
- monitor the availability of system files necessary for the operation of processes;
- check whether there is a connection between processes and services, where the information comes from or where it is sent.
## Build requirements
- `rustup (>=1.27.1)` - `rustup (>=1.27.1)`
- `gcc-riscv64-unknown-elf` - `gcc-riscv64-unknown-elf`
- `build-essential` - `build-essential`
@ -12,60 +22,70 @@
- `binutils-riscv64-linux-gnu` - `binutils-riscv64-linux-gnu`
## Setting up ## Key items in repo
Download and execute rustup.sh
~~~bash 1) Main daemon `noxis-rs`
2) CLI `noxis-cli`
3) Unix-Socket to Web-Socket **Proxy** for integrations `noxis-proxy`
## Setting up device
Download and execute rustup.sh *(for building)*
~~~ bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
~~~ ~~~
## Building ## Building
1. Clone this repo `runner-rs` 1. Clone `noxis`
~~~bash ~~~ bash
git clone https://github.com/prplV/runner-rs git clone https://github.com/prplV/noxis
~~~ ~~~
2. Enter project's dir and set up toolchain list to compile code for RISC-V and AMD64 2. Enter project's dir and set up toolchain list to compile code for RISC-V or x86_64
~~~bash ~~~ bash
cd runner-rs/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu cd noxis/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu
~~~ ~~~
> [!NOTE] > [!NOTE]
> Cargo is configured to build an app for amd64/linux defaultly. RISCV-based compilation is optional. > Cargo is configured to build an apputil for x86_64/linux defaultly. RISCV-based compilation is optional.
3.1. Release build of app for amd64/linux 3.1. Release build of util for x86_64/linux
~~~bash ~~~bash
cargo x86_64 cargo x86_64
~~~ ~~~
3.2. Release build of app for riscv64/linux 3.2. Release build of util for riscv64/linux
~~~bash ~~~bash
cargo riscv64 cargo riscv64
~~~ ~~~
3.3. Release build of app for both (riscv64 and amd64) 3.3. Release build of util for both (riscv64 and x86_64)
~~~bash ~~~bash
cargo unibuild cargo unibuild
~~~ ~~~
## Execution for amd64/linux ## Execution **DAEMON** for x86_64/linux
1) If you work on x86_64/linux machine execute:
~~~bash ~~~bash
./target/x86_64-unknown-linux-gnu/release/runner-rs ./target/x86_64-unknown-linux-gnu/release/noxis-rs
~~~ ~~~
or or
~~~bash ~~~bash
cargo run_x86 cargo run_x86
~~~ ~~~
2) If you work on riscv64/linux machine execute:
## Execution for riscv64/linux
~~~bash ~~~bash
./target/riscv64gc-unknown-linux-gnu/release/runner-rs ./target/riscv64gc-unknown-linux-gnu/release/noxis-rs
~~~ ~~~
or or
> [!CAUTION] > [!CAUTION]

BIN
logo.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

After

Width:  |  Height:  |  Size: 221 KiB

1
noxis-cli/.env.example Normal file
View File

@ -0,0 +1 @@
NOXIS_SOCKET_PATH = "/home/vladislavd/diplom_code/noxis-rs/noxis.sock"

View File

@ -6,6 +6,7 @@ edition = "2021"
[dependencies] [dependencies]
anyhow = "1.0.94" anyhow = "1.0.94"
clap = { version = "4.5.22", features = ["derive"] } clap = { version = "4.5.22", features = ["derive"] }
dotenv = "0.15.0"
serde = { version = "1.0.215", features = ["derive"] } serde = { version = "1.0.215", features = ["derive"] }
serde_json = "1.0.133" serde_json = "1.0.133"
thiserror = "2.0.11" thiserror = "2.0.11"

View File

@ -1,52 +1,43 @@
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use metrics_models::MetricsMode;
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct Cli { pub struct Cli {
#[arg( #[arg(
short, short,
default_value="noxis-rs.sock", default_value = "noxis-rs.sock",
help="explicit specify of NOXIS Socket file" help = "explicit specify of NOXIS Socket file"
)] )]
pub socket : String, pub socket: String,
#[command( #[command(subcommand, help = "to manage Noxis work")]
subcommand, pub command: Commands,
help = "to manage Noxis work",
)]
pub command : Commands,
} }
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
pub enum Commands { pub enum Commands {
#[command( #[command(about = "To get info about current Noxis status")]
about = "To get info about current Noxis status", Status,
)] #[command(about = "To start Noxis process")]
Status,
#[command(
about = "To start Noxis process",
)]
Start(StartAction), Start(StartAction),
#[command( #[command(about = "To stop Noxis process")]
about = "To stop Noxis process",
)]
Stop, Stop,
#[command( #[command(about = "To restart Noxis process")]
about = "To restart Noxis process",
)]
Restart(StartAction), Restart(StartAction),
#[command( #[command(about = "To get list of processes that are being monitoring")]
about = "To get list of processes that are being monitoring",
)]
Processes, Processes,
// process command // process command
#[command( #[command(about = "To manage current process that is being monitoring")]
about = "To manage current process that is being monitoring",
)]
Process(ProcessCommand), Process(ProcessCommand),
// config command = #[command(about = "To manage config settings")]
#[command(
about = "To manage config settings",
)]
Config(ConfigCommand), Config(ConfigCommand),
#[command(about = "To inspect system metrics in restricted mode")]
Inspect(MetricsCommand),
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct MetricsCommand {
#[command(subcommand)]
pub mode: MetricsMode,
} }
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
@ -56,96 +47,100 @@ pub struct StartAction {
num_args = 1.., num_args = 1..,
value_delimiter = ' ' value_delimiter = ' '
)] )]
pub flags : Vec<String>, pub flags: Vec<String>,
} }
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct ConfigCommand { pub struct ConfigCommand {
#[command(subcommand)] #[command(subcommand)]
pub action : ConfigAction, pub action: ConfigAction,
} }
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
pub enum ConfigAction { pub enum ConfigAction {
#[command( #[command(about = "To change current Noxis configuration")]
about = "To change current Noxis configuration",
)]
Local(LocalConfig), Local(LocalConfig),
#[command( #[command(about = "To change credentials of the remote config server")]
about = "To change credentials of the remote config server",
)]
Remote, Remote,
#[command( #[command(about = "To reset all config settings")]
about = "To reset all config settings",
)]
Reset, Reset,
#[command(about = "To get current Noxis configuration", name = "ls")]
Show(EnvConfig),
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct EnvConfig {
// flag
#[arg(long = "env", action, help = "to read environment vars configuration")]
pub is_env: bool,
} }
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct LocalConfig { pub struct LocalConfig {
// flag // flag
#[arg( #[arg(long = "json", action, help = "to read following input as JSON")]
long = "json", pub is_json: bool,
action,
help = "to read following input as JSON",
)]
pub is_json : bool,
// value // value
#[arg( #[arg(help = "path to config file or config String (with --json flag)")]
help = "path to config file or config String (with --json flag)", pub config: String,
)]
pub config : String,
} }
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)] #[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct ProcessCommand { pub struct ProcessCommand {
#[arg( #[arg(help = "name of needed process")]
help = "name of needed process", pub process: String,
)] #[command(subcommand, help = "To get current process's status")]
pub process : String, pub action: ProcessAction,
#[command(
subcommand,
help = "To get current process's status",
)]
pub action : ProcessAction,
} }
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)] #[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
pub enum ProcessAction { pub enum ProcessAction {
#[command( #[command(about = "To get info about current process status")]
about = "To get info about current process status", Status,
)] #[command(about = "To start current process")]
Status, Start,
#[command( #[command(about = "To stop current process")]
about = "To start current process", Stop,
)] #[command(about = "To freeze (hybernaze) current process")]
Start, Freeze,
#[command( #[command(about = "To unfreeze (unhybernaze) current process")]
about = "To stop current process", Unfreeze,
)] #[command(about = "To restart current process")]
Stop, Restart,
#[command( #[command(about = "To get info about current process's dependencies")]
about = "To freeze (hybernaze) current process", Deps,
)] #[command(about = "To get info about current process's files-dependencies")]
Freeze, Files,
#[command( #[command(about = "To get info about current process's services-dependencies")]
about = "To unfreeze (unhybernaze) current process", Services,
)] }
Unfreeze,
#[command( pub mod metrics_models {
about = "To restart current process", #[derive(Debug, clap::Parser, serde::Serialize, serde::Deserialize)]
)] pub enum MetricsMode {
Restart, #[command(about = "To capture all metrics about undercontrolled system")]
#[command( Full,
about = "To get info about current process's dependencies", // system
)] #[command(about = "To capture general host info")]
Deps, Host,
#[command( #[command(about = "To capture detailed CPU metrics")]
about = "To get info about current process's files-dependencies", Cpu,
)] #[command(about = "To capture RAM metrics")]
Files, Ram,
#[command( #[command(about = "To capture disk environment metrics")]
about = "To get info about current process's services-dependencies", Rom,
)] #[command(about = "To capture system net interfaces metrics")]
Services, Network,
} // processes
#[command(about = "To capture monitoring processes metrics")]
Processes, // Config
}
}
impl Cli {
pub fn validate_socket(mut self) -> Self {
if let Ok(path) = std::env::var("NOXIS_SOCKET_PATH") {
self.socket = path;
}
self
}
}

View File

@ -1,8 +1,9 @@
use thiserror::Error; use thiserror::Error;
#[derive(Debug, Error)] #[derive(Debug, Error)]
#[allow(dead_code)]
pub enum NoxisCliError { pub enum NoxisCliError {
#[error("Can't find socket `{0}`. Error : {1}")] #[error("Can't find socket `{0}`. {1}")]
NoxisDaemonMissing(String, String), NoxisDaemonMissing(String, String),
#[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's runtime!")] #[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's runtime!")]
PortIsNotWritable, PortIsNotWritable,
@ -11,5 +12,5 @@ pub enum NoxisCliError {
#[error("Can't parse CLI struct and send as byte stream")] #[error("Can't parse CLI struct and send as byte stream")]
ToStringCliParsingParsing, ToStringCliParsingParsing,
#[error("Can't read Noxis response due to {0}")] #[error("Can't read Noxis response due to {0}")]
CliResponseReadError(String) CliResponseReadError(String),
} }

View File

@ -1,30 +1,37 @@
use tokio::net::UnixStream;
use tokio::io::{AsyncWriteExt, AsyncReadExt};
use tokio::time::{Duration, sleep};
use anyhow::Result;
use super::Cli;
use super::cli_error::NoxisCliError; use super::cli_error::NoxisCliError;
use super::Cli;
use anyhow::Result;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::UnixStream;
#[allow(dead_code)]
async fn create_us_stream(cli: &Cli) -> Result<UnixStream> { async fn create_us_stream(cli: &Cli) -> Result<UnixStream> {
Ok(UnixStream::connect(&cli.socket).await.map_err(|er| NoxisCliError::NoxisDaemonMissing((&cli.socket).to_string(), er.to_string()))?) Ok(UnixStream::connect(&cli.socket).await.map_err(|er| {
NoxisCliError::NoxisDaemonMissing((&cli.socket).to_string(), er.to_string())
})?)
} }
#[allow(dead_code)]
pub async fn try_send(cli: Cli) -> Result<()> { pub async fn try_send(cli: Cli) -> Result<()> {
// let stream = create_us_stream(&cli).await;
let mut stream = create_us_stream(&cli).await?; let mut stream = create_us_stream(&cli).await?;
let msg = serde_json::to_vec(&cli) let msg = serde_json::to_vec(&cli).map_err(|_| NoxisCliError::ToStringCliParsingParsing)?;
.map_err(|_| NoxisCliError::ToStringCliParsingParsing)?;
stream.write_all(&msg) stream
.write_all(&msg)
.await .await
.map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?; .map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?;
let mut response = [0; 1024]; let mut response = Vec::new();
stream.read(&mut response) stream
.read_to_end(&mut response)
.await .await
.map_err(|er| NoxisCliError::CliResponseReadError(er.to_string()))?; .map_err(|er| NoxisCliError::CliResponseReadError(er.to_string()))?;
println!("Received response: {}", String::from_utf8_lossy(&response)); let response = String::from_utf8_lossy(&response);
for line in response.lines() {
println!("{}", line);
}
Ok(()) Ok(())
} }

View File

@ -1,5 +1,5 @@
mod cli; mod cli;
mod cli_net;
mod cli_error; mod cli_error;
mod cli_net;
pub use cli::*; pub use cli::*;

View File

@ -1,15 +1,16 @@
mod cli; mod cli;
mod cli_net;
mod cli_error; mod cli_error;
mod cli_net;
use anyhow::Result;
use clap::Parser; use clap::Parser;
use cli::Cli; use cli::Cli;
use cli_net::try_send; use cli_net::try_send;
use anyhow::Result;
#[tokio::main] #[tokio::main]
async fn main() -> Result<()>{ async fn main() -> Result<()> {
let cli = Cli::parse(); dotenv::dotenv().ok();
let cli = Cli::parse().validate_socket();
try_send(cli).await?; try_send(cli).await?;
Ok(()) Ok(())
} }

View File

@ -0,0 +1,4 @@
.env
.env.example
README.md
target

2
noxis-proxy/.env.example Normal file
View File

@ -0,0 +1,2 @@
NOXIS_SOCKET_PATH = "/path/to/noxis.sock"
NOXIS_PROXY_PORT = "numport"

12
noxis-proxy/Cargo.toml Normal file
View File

@ -0,0 +1,12 @@
[package]
name = "noxis-proxy"
version = "0.1.0"
edition = "2024"
[dependencies]
anyhow = "1.0.98"
axum = { version = "0.8.4", features = ["ws"] }
dotenv = "0.15.0"
tokio = { version = "1.45.1", features = ["full"] }
tracing = "0.1.41"
tracing-subscriber = "0.3.19"

19
noxis-proxy/Dockerfile Normal file
View File

@ -0,0 +1,19 @@
FROM rust:latest AS builder
WORKDIR /app
RUN apt update && apt install -y musl-tools
RUN rustup target add x86_64-unknown-linux-musl
COPY . .
RUN cargo build --release --target=x86_64-unknown-linux-musl
FROM alpine:latest
WORKDIR /app
COPY --from=builder /app/target/x86_64-unknown-linux-musl/release/noxis-proxy /app/noxis-proxy
RUN apk add --no-cache ca-certificates
EXPOSE 7654
ENTRYPOINT ["/app/noxis-proxy"]

View File

@ -0,0 +1,20 @@
services:
noxis-proxy:
container_name: noxis-proxy
image: noxis-proxy:0.1.0
networks:
- noxis-net
environment:
- NOXIS_SOCKET_PATH=./noxis.sock
- NOXIS_PROXY_PORT=7654
- NOXIS_LOG_LEVEL=TRACE
volumes:
- /home/user/diplom_code/noxis-rs/noxis.sock:/app/noxis.sock
ports:
- 7654:7654
restart: always
networks:
noxis-net:
driver: bridge

96
noxis-proxy/src/main.rs Normal file
View File

@ -0,0 +1,96 @@
use axum::{
extract::{
ws::{Message, WebSocket, WebSocketUpgrade},
State,
},
response::IntoResponse,
routing::get,
Router,
};
use std::{
path::PathBuf, str::FromStr,
};
use tokio::net::UnixStream;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[derive(Clone)]
struct AppState {
socket_path: PathBuf,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
dotenv::dotenv().ok();
tracing_subscriber::fmt()
.with_max_level(tracing::Level::from_str(&std::env::var("NOXIS_LOG_LEVEL").unwrap_or_else(|_| String::from("INFO"))).unwrap_or_else(|_| tracing::Level::INFO))
.with_writer(std::io::stdout)
.compact()
.init();
let app_state = AppState {
socket_path : PathBuf::new().join(std::env::var("NOXIS_SOCKET_PATH").unwrap_or_else(|_| String::from("./noxis.sock")))
};
let app = Router::new()
.route("/ws", get(ws_handler))
.route("/hello", get(hello))
.with_state(app_state);
let bind = format!("0.0.0.0:{}", std::env::var("NOXIS_PROXY_PORT").unwrap_or_else(|_| String::from("7654")));
tracing::info!("Serving on {}", &bind);
let listener = tokio::net::TcpListener::bind(bind)
.await?;
axum::serve(listener, app).await?;
Ok(())
}
async fn ws_handler(
ws: WebSocketUpgrade,
State(state): State<AppState>,
) -> impl IntoResponse {
tracing::info!("New WebSocket connection");
ws.on_upgrade(|socket| handle_socket(socket, state))
}
async fn hello(
State(_state): State<AppState>,
) -> impl IntoResponse {
String::from("HELLO")
}
async fn handle_socket(mut ws: WebSocket, state: AppState) {
tracing::info!("handle websocket");
let ws_receiver = tokio::spawn(async move {
while let Some(Ok(msg)) = ws.recv().await {
let mut unix_socket = match UnixStream::connect(&state.socket_path).await {
Ok(socket) => socket,
Err(e) => {
tracing::error!("Failed to connect to Unix socket: {}", e);
let _ = ws.send(Message::Text("ERROR: Unix socket connection failed".into())).await;
return;
}
};
if let Message::Text(text) = msg {
if let Err(e) = unix_socket.write_all(text.as_bytes()).await {
tracing::error!("Failed to write to Unix socket: {}", e);
break;
}
let mut buf = Vec::new();
match unix_socket.read_to_end(&mut buf).await {
Ok(n) if n > 0 => {
let response = String::from_utf8_lossy(&buf[..n]);
if ws.send(Message::Text(response.into_owned().into())).await.is_err() {
break;
}
}
Ok(_) | Err(_) => break,
}
}
}
});
let _ = ws_receiver.await;
}

16
noxis-rs/.env.example Normal file
View File

@ -0,0 +1,16 @@
# ACTIONS (trigger on exist)
NOXIS_NO_HAGENT = "true"
# NOXIS_NO_LOGS = "true"
NOXIS_REFRESH_LOGS = "true"
NOXIS_NO_SUB = "true"
# VALUES
NOXIS_HAGENT_SOCKET_PATH = "/var/run/example/hostagent.sock"
NOXIS_LOG_TO = "/var/log/noxis/noxis.log"
NOXIS_REMOTE_SERVER_URL = "ip.ip.ip.ip:port"
NOXIS_CONFIG_PATH = "./settings.json"
NOXIS_METRICS_MODE = "full"
NOXIS_SOCKET_PATH = "/path/to/noxis.sock"
NOXIS_BACKUP_FOLDER = "/path/to/backups/folder"
NOXIS_MAX_LOG_LEVEL = "TRACE"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "noxis-rs" name = "noxis-rs"
version = "0.11.26" version = "0.12.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
@ -20,3 +20,6 @@ noxis-cli = { path = "../noxis-cli" }
dotenv = "0.15.0" dotenv = "0.15.0"
futures = "0.3.31" futures = "0.3.31"
async-trait = "0.1.88" async-trait = "0.1.88"
crossbeam = { version = "0.8.4", features = ["crossbeam-channel"] }
lazy_static = "1.5.0"
ulid = "1.2.1"

View File

@ -1,6 +1,5 @@
{ {
"dateOfCreation": "1721381809112", "dateOfCreation": "1721381809112",
"configServer": "192.168.2.37",
"processes": [ "processes": [
{ {
"name": "temp-process", "name": "temp-process",
@ -12,7 +11,35 @@
"src": "./tests/examples/", "src": "./tests/examples/",
"triggers": { "triggers": {
"onDelete": "stop", "onDelete": "stop",
"onChange": "restart" "onChange": "restart",
"doRestore" : true
}
},
{
"filename": "none.json",
"src": "./tests/examples/",
"triggers": {
"onDelete": "stop",
"onChange": "restart",
"doRestore" : false
}
},
{
"filename": "invalid_config.json",
"src": "./tests/examples/",
"triggers": {
"onDelete": "stop",
"onChange": "restart",
"doRestore" : false
}
},
{
"filename": "save-conf.json",
"src": "./tests/examples/",
"triggers": {
"onDelete": "stop",
"onChange": "restart",
"doRestore" : true
} }
} }
], ],
@ -21,12 +48,20 @@
"hostname": "ya.ru", "hostname": "ya.ru",
"port": 443, "port": 443,
"triggers": { "triggers": {
"wait": 10, "wait": 2,
"onLost": "restart" "onLost": "stop"
}
},
{
"hostname": "8.8.8.8",
"port": 443,
"triggers": {
"wait": 2,
"onLost": "stop"
} }
} }
] ]
} }
} }
] ]
} }

View File

@ -1,53 +1,103 @@
mod options; mod options;
mod utils; mod utils;
use clap::Parser;
use log::{error, info}; use log::{error, info};
use options::config::*;
use options::logger::setup_logger;
use options::signals::set_valid_destructor;
use options::structs::Processes;
use options::cli_pipeline::init_cli_pipeline; use options::cli_pipeline::init_cli_pipeline;
use options::config::v2::init_config_mechanism;
use options::logger::setup_logger;
use options::preboot::PrebootParams;
use options::signals::set_valid_destructor;
use options::structs::ProcessUnit;
use options::structs::{bus::BusMessage, Processes};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::mpsc; use tokio::sync::{broadcast, mpsc, oneshot};
use utils::*; use utils::bus::Bus;
use options::preboot::PrebootParams; use utils::metrics::init_metrics_grubber;
use tokio::sync::{broadcast, oneshot};
use options::config::v2::init_config_mechanism;
use utils::v2::init_monitoring; use utils::v2::init_monitoring;
#[tokio::main(flavor = "multi_thread", worker_threads = 4)] #[tokio::main(flavor = "multi_thread", worker_threads = 4)]
async fn main() -> anyhow::Result<()>{ async fn main() -> anyhow::Result<()> {
let preboot = Arc::new(PrebootParams::parse().validate()?); let preboot = Arc::new(PrebootParams::validate());
let _ = setup_logger(); let _ = setup_logger();
info!("Noxis is configurating..."); info!("Noxis is configurating...");
// //
let (tx_brd, mut rx_brd) = broadcast::channel::<Processes>(1); let (tx_brd, mut rx_brd) = broadcast::channel::<Processes>(1);
// for cli to get config
let mut rx_cli_brd = tx_brd.subscribe();
// cli <-> config // cli <-> config
let (tx_oneshot, rx_oneshot) = oneshot::channel::<Processes>(); let (tx_oneshot, rx_oneshot) = oneshot::channel::<Processes>();
let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![]; let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![];
// initilaizing task for config manipulations // to BUS channel
let (tx_to_bus, rx_to_bus) = mpsc::channel::<BusMessage>(5);
// from BUS channels
let (tx_to_cli, rx_to_cli) = mpsc::channel::<BusMessage>(5);
let (tx_to_supervisor, rx_to_supervisor) = mpsc::channel::<BusMessage>(5);
let (tx_to_metrics, rx_to_metrics) = mpsc::channel::<BusMessage>(5);
let tx_to_bus = Arc::new(tx_to_bus);
let tx_to_cli = Arc::new(tx_to_cli);
let tx_to_supervisor = Arc::new(tx_to_supervisor);
let tx_to_metrics = Arc::new(tx_to_metrics);
let bus_module = tokio::spawn(async move {
let mut bus = Bus::new(
rx_to_bus,
tx_to_cli.clone(),
tx_to_supervisor.clone(),
tx_to_metrics.clone(),
);
bus.process().await;
error!("Info Bus crushed !");
});
handler.push(bus_module);
// initilaizing task for config manipulations
let preboot_config = preboot.clone();
let config_module = tokio::spawn(async move { let config_module = tokio::spawn(async move {
let _ = init_config_mechanism( let _ = init_config_mechanism(rx_oneshot, tx_brd, preboot_config).await;
rx_oneshot,
tx_brd,
preboot.clone()
).await;
}); });
handler.push(config_module); handler.push(config_module);
// initilaizing task for cli manipulation // initilaizing task for cli manipulation
let tx_bus = tx_to_bus.clone();
let preboot_cli = preboot.clone();
let cli_module = tokio::spawn(async move { let cli_module = tokio::spawn(async move {
if let Err(er) = init_cli_pipeline().await { let config = {
let mut tick = tokio::time::interval(Duration::from_millis(500));
loop {
tick.tick().await;
break match rx_cli_brd.try_recv() {
Ok(conf) => conf,
Err(_) => continue,
};
}
};
if let Err(er) = init_cli_pipeline(
preboot_cli,
Arc::new(config),
tx_oneshot,
rx_to_cli,
tx_bus.clone(),
)
.await
{
error!("CLI pipeline failed due to {}", er) error!("CLI pipeline failed due to {}", er)
} }
}); });
handler.push(cli_module); handler.push(cli_module);
// metrics
let tx_bus = tx_to_bus.clone();
let metrics_module = tokio::spawn(async move {
if let Err(er) = init_metrics_grubber(tx_bus.clone(), rx_to_metrics).await {
error!("Metrics module crushed : {}", er);
}
});
handler.push(metrics_module);
// initilaizing task for deinitializing `Noxis` // initilaizing task for deinitializing `Noxis`
let ctrlc = tokio::spawn(async move { let ctrlc = tokio::spawn(async move {
if let Err(er) = set_valid_destructor(vec![].into()).await { if let Err(er) = set_valid_destructor(vec![].into()).await {
@ -57,6 +107,8 @@ async fn main() -> anyhow::Result<()>{
}); });
handler.push(ctrlc); handler.push(ctrlc);
let tx_bus = tx_to_bus.clone();
let preboot_cli = preboot.clone();
let monitoring = tokio::spawn(async move { let monitoring = tokio::spawn(async move {
let config = { let config = {
let mut tick = tokio::time::interval(Duration::from_millis(500)); let mut tick = tokio::time::interval(Duration::from_millis(500));
@ -65,10 +117,10 @@ async fn main() -> anyhow::Result<()>{
break match rx_brd.try_recv() { break match rx_brd.try_recv() {
Ok(conf) => conf, Ok(conf) => conf,
Err(_) => continue, Err(_) => continue,
} };
} }
}; };
if let Err(er) = init_monitoring(config).await { if let Err(er) = init_monitoring(config, preboot_cli, rx_to_supervisor, tx_bus).await {
error!("Monitoring mod failed due to {}", er); error!("Monitoring mod failed due to {}", er);
} }
}); });
@ -77,80 +129,5 @@ async fn main() -> anyhow::Result<()>{
for i in handler { for i in handler {
let _ = i.await; let _ = i.await;
} }
// setting up redis connection \
// then conf checks to choose the most actual \
// let processes: Processes = get_actual_config(preboot.clone()).await.unwrap_or_else(|| {
// error!("No actual configuration for runner. Stopping...");
// std::process::exit(1);
// });
//
// info!(
// "Current runner configuration: {}",
// &processes.date_of_creation
// );
// info!("Runner is ready. Initializing...");
//
// if processes.processes.is_empty() {
// error!("Processes list is null, runner-rs initialization is stopped");
// return Err(Error::msg("Empty processes segment in config"));
// }
// let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![];
// // is in need to send to the signals handler thread
// let mut senders: Vec<Arc<mpsc::Sender<u8>>> = vec![];
//
// for proc in processes.processes.iter() {
// info!(
// "Process '{}' on stage: {}. Depends on {} file(s), {} service(s)",
// proc.name,
// proc.path,
// proc.dependencies.files.len(),
// proc.dependencies.services.len()
// );
//
// // creating msg channel
// // can or should be executed in new thread
// let (tx, mut rx) = mpsc::channel::<u8>(1);
// let proc = Arc::new(proc.clone());
// let tx = Arc::new(tx.clone());
//
// senders.push(Arc::clone(&tx.clone()));
//
// let event = tokio::spawn(async move {
// run_daemons(proc.clone(), tx.clone(), &mut rx).await;
// });
// handler.push(event);
// }
//
// // destructor addition
// handler.push(tokio::spawn(async move {
// if set_valid_destructor(Arc::new(senders)).await.is_err() {
// error!("Linux signals handler creation failed. Terminating main thread...");
// return;
// }
//
// tokio::time::sleep(Duration::from_millis(200)).await;
// info!("End of job. Terminating main thread...");
// std::process::exit(0);
// }));
//
// // remote config update subscription
// handler.push(tokio::spawn(async move {
// let _ = subscribe_config_stream(Arc::new(processes), preboot.clone()).await;
// }));
//
// // cli pipeline
// handler.push(tokio::spawn(async move {
// let _ = init_cli_pipeline().await;
// }));
//
// for i in handler {
// let _ = i.await;
// }
Ok(()) Ok(())
} }
// todo: integration tests
// todo: config pulling mechanism rework (socket)
// todo: tasks management after killing all processes
// todo:

View File

@ -1,8 +1,8 @@
// ! gathering optional items module // ! gathering optional items module
pub mod cli_pipeline;
pub mod config; pub mod config;
pub mod logger; pub mod logger;
pub mod preboot;
pub mod signals; pub mod signals;
pub mod structs; pub mod structs;
pub mod preboot;
pub mod cli_pipeline;

View File

@ -1,88 +1,350 @@
use super::structs::bus::BusMessage;
use super::structs::Processes;
use crate::options::structs::bus::InternalCli;
use log::{error, info}; use log::{error, info};
use tokio::net::{ UnixStream, UnixListener }; use noxis_cli::{Cli, ProcessAction};
use tokio::time::{sleep, Duration}; use std::any::Any;
use std::fs; use std::fs;
use tokio::io::{ AsyncWriteExt, AsyncReadExt}; use std::sync::Arc;
use noxis_cli::Cli; use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{UnixListener, UnixStream};
use tokio::sync::{Mutex, OnceCell};
use tokio::time::{sleep, Duration};
use super::preboot::PrebootParams;
type ConfigGateway = tokio::sync::oneshot::Sender<Processes>;
type ProcessedConfigGateway = Arc<Mutex<OnceCell<ConfigGateway>>>;
type BusReciever = tokio::sync::mpsc::Receiver<BusMessage>;
type BusSender = Arc<tokio::sync::mpsc::Sender<BusMessage>>;
type ReadyBusReciever = Arc<Mutex<tokio::sync::mpsc::Receiver<BusMessage>>>;
/// # Fn `init_cli_pipeline` /// # Fn `init_cli_pipeline`
/// ## for catching all input requests from CLI /// ## for catching all input requests from CLI
/// ///
/// *input* : - /// *input* : -
/// ///
/// *output* : `anyhow::Result<()>` to wrap errors /// *output* : `anyhow::Result<()>` to wrap errors
/// ///
/// *initiator* : fn `main` /// *initiator* : fn `main`
/// ///
/// *managing* : `TcpListener` object to handle requests /// *managing* : `TcpListener` object to handle requests
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub async fn init_cli_pipeline() -> anyhow::Result<()> { pub async fn init_cli_pipeline(
let socket_path = "noxis.sock"; params: Arc<PrebootParams>,
config: Arc<Processes>,
config_gateway: ConfigGateway,
bus_reciever: BusReciever,
bus_sender: BusSender,
) -> anyhow::Result<()> {
let socket_path = &params.self_socket;
let _ = fs::remove_file(socket_path); let _ = fs::remove_file(socket_path);
let config_gateway = Arc::new(Mutex::new(OnceCell::new_with(Some(config_gateway))));
let bus_reciever = Arc::new(Mutex::new(bus_reciever));
match UnixListener::bind(socket_path) { match UnixListener::bind(socket_path) {
Ok(list) => { Ok(list) => {
// TODO: remove `unwrap`s // TODO: remove `unwrap`s
info!("Listening on {}", socket_path); info!("Listening on {}", socket_path.display());
std::env::set_var("NOXIS_SOCKET_PATH", socket_path);
loop { loop {
match list.accept().await { match list.accept().await {
Ok((socket, _)) => { Ok((socket, _)) => {
// tokio::spawn(); // ??? maybe errors on async work with data transfering between modules
process_connection(socket).await; let params = params.clone();
}, let config = config.clone();
let config_gateway = config_gateway.clone();
let bus_reciever = bus_reciever.clone();
let bus_sender = bus_sender.clone();
tokio::spawn(async move {
process_connection(
socket,
params.clone(),
config.clone(),
config_gateway.clone(),
bus_reciever,
bus_sender,
)
.await;
});
}
Err(er) => { Err(er) => {
error!("Cannot poll connection to CLI due to {}", er); error!("Cannot poll connection to CLI due to {}", er);
sleep(Duration::from_millis(300)).await; sleep(Duration::from_millis(300)).await;
}, }
} }
} }
// Ok(()) // Ok(())
}, }
Err(er) => { Err(er) => {
error!("Failed to open UnixListener for CLI"); error!("Failed to open UnixListener for CLI");
Err(er.into()) Err(er.into())
}, }
} }
} }
/// # Fn `process_connection` /// # Fn `process_connection`
/// ## for processing input CLI requests /// ## for processing input CLI requests
/// ///
/// *input* : mut stream: `TcpStream` /// *input* : mut stream: `TcpStream`
/// ///
/// *output* : - /// *output* : -
/// ///
/// *initiator* : fn `init_cli_pipeline` /// *initiator* : fn `init_cli_pipeline`
/// ///
/// *managing* : mutable object of `TcpStream` /// *managing* : mutable object of `TcpStream`
/// ///
/// *depends on* : `tokio::net::TcpStream` /// *depends on* : `tokio::net::TcpStream`
/// ///
async fn process_connection(mut stream: UnixStream) { async fn process_connection(
mut stream: UnixStream,
params: Arc<PrebootParams>,
config: Arc<Processes>,
cfg_gateway: ProcessedConfigGateway,
bus_reciever: ReadyBusReciever,
bus_sender: BusSender,
) {
let mut buf = vec![0; 1024]; let mut buf = vec![0; 1024];
match stream.read(&mut buf).await { match stream.read(&mut buf).await {
Ok(0) => { Ok(0) => {
info!("Client disconnected "); info!("Client disconnected ");
}, }
Ok(n) => { Ok(n) => {
buf.truncate(n); buf.truncate(n);
info!("CLI have sent {} bytes", n); info!("CLI have sent {} bytes", n);
match serde_json::from_slice::<Cli>(&buf) { match serde_json::from_slice::<Cli>(&buf) {
Ok(cli) => { Ok(cli) => {
info!("Received CLI request: {:?}", cli); info!("Received CLI request: {:?}", cli);
let response = "OK"; let response = match process_cli_cmd(
if let Err(e) = stream.write_all(response.as_bytes()).await { cli,
error!("Failed to send response: {}", e); params.clone(),
config,
cfg_gateway.clone(),
bus_reciever.clone(),
bus_sender.clone(),
)
.await
{
Ok(response) => response,
Err(er) => {
let error_msg = format!("Error: {}", er);
error!("{}", &error_msg);
error_msg
}
};
for line in response.lines() {
if let Err(er) = stream.write_all(line.as_bytes()).await {
error!("Failed to send response: {}", er);
}
} }
} }
Err(e) => { Err(e) => {
error!("Failed to parse CLI request: {}", e); error!("Failed to parse CLI request: {}", e);
} }
} }
}, }
Err(e) => error!("Failed to read from socket: {}", e), Err(e) => error!("Failed to read from socket: {}", e),
} }
let _ = stream.shutdown().await; let _ = stream.shutdown().await;
} }
async fn process_cli_cmd(
cli: Cli,
params: Arc<PrebootParams>,
global_config: Arc<Processes>,
cfg_gateway: ProcessedConfigGateway,
bus_reciever: ReadyBusReciever,
bus_sender: BusSender,
) -> anyhow::Result<String> {
use noxis_cli::{Commands, ConfigAction};
return match cli.command {
Commands::Config(config) => {
match config.action {
ConfigAction::Show(env) => {
if env.is_env {
Ok(serde_json::to_string_pretty(params.as_ref())?)
} else {
/* */
Ok(serde_json::to_string_pretty(global_config.as_ref())?)
}
}
ConfigAction::Reset => Err(anyhow::Error::msg(
"It's temporarly forbidden to reset current config using CLI-util",
)),
ConfigAction::Local(cfg) => {
if cfg.is_json {
/* */
let new_config = serde_json::from_str::<Processes>(&cfg.config)?;
let new_version = new_config.get_version().to_string();
use super::{config::config_comparing, structs::ConfigActuality};
return match config_comparing(&global_config, &new_config) {
ConfigActuality::Remote => {
let cfg_gateway = cfg_gateway.clone();
tokio::spawn(async move {
let mut lock = cfg_gateway.lock().await;
match lock.take() {
Some(channel) => {
let _ = channel.send(new_config);
}
None => error!(
"Cannot update confif due to channel sender loss"
),
}
});
Ok(format!(
"Ok. Saving and reloading with version {}",
new_version
))
}
_ => Err(anyhow::Error::msg(format!(
"Local config (version: {}) is more actual",
global_config.get_version()
))),
};
} else {
Err(anyhow::Error::msg(
"It's temporarly forbidden to set config in non-json mode",
))
}
}
ConfigAction::Remote => Ok(params.remote_server_url.clone()),
/* */
// _ => Err(anyhow::Error::msg("Unrecognized command from CLI"))
}
}
Commands::Processes => {
use crate::options::structs::bus::{BusMessageContentType, BusMessageDirection};
use crate::utils::metrics::processes::ProcessesQuery;
let _ = bus_sender
.send(BusMessage::Request(
BusMessageDirection::ToSupervisor,
BusMessageContentType::ProcessQuery,
Box::new(ProcessesQuery::QueryAll),
))
.await;
let mut bus = bus_reciever.lock().await;
let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move {
loop {
if let Ok(cont) = bus.try_recv() {
return cont;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
})
.await?;
if let BusMessage::Response(_, _, content) = resp {
let content: Box<dyn Any> = content;
if let Ok(resp) = content.downcast::<anyhow::Result<String>>() {
return Ok((*resp)?);
}
}
Err(anyhow::Error::msg(format!(
"Unknown type of response from the Supervisor"
)))
}
Commands::Process(prc) => {
use crate::options::structs::bus::{
BusMessageContentType, BusMessageDirection, CLiCommand,
};
let proc_name = prc.process;
let req = BusMessage::Request(
BusMessageDirection::ToSupervisor,
BusMessageContentType::Cli,
Box::new(match prc.action {
ProcessAction::Start => InternalCli {
prc: proc_name,
cmd: CLiCommand::Start,
},
ProcessAction::Stop => InternalCli {
prc: proc_name,
cmd: CLiCommand::Stop,
},
ProcessAction::Restart => InternalCli {
prc: proc_name,
cmd: CLiCommand::Restart,
},
ProcessAction::Freeze => InternalCli {
prc: proc_name,
cmd: CLiCommand::Freeze,
},
ProcessAction::Unfreeze => InternalCli {
prc: proc_name,
cmd: CLiCommand::Unfreeze,
},
/* TODO: ALL CMDS */
_ => InternalCli {
prc: proc_name,
cmd: CLiCommand::Restart,
},
}),
);
let mut bus = bus_reciever.lock().await;
bus_sender.send(req).await?;
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move {
loop {
if let Ok(cont) = bus.try_recv() {
return cont;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
})
.await?;
if let BusMessage::Response(_, _, content) = resp {
let content: Box<dyn Any> = content;
if let Ok(resp) = content.downcast::<anyhow::Result<String>>() {
return Ok((*resp)?);
}
}
Err(anyhow::Error::msg(format!(
"Unknown type of response from the Supervisor"
)))
}
Commands::Status => Ok(String::from("Ok")),
Commands::Inspect(mode) => {
use crate::options::structs::bus::{BusMessageContentType, BusMessageDirection};
let mode = mode.mode;
if let Ok(()) = bus_sender
.send(BusMessage::Request(
BusMessageDirection::ToMetrics,
BusMessageContentType::MetricsModeTransfered,
Box::new(mode),
))
.await
{
let mut bus_reciever = bus_reciever.lock().await;
sleep(Duration::from_millis(300)).await;
let resp = tokio::time::timeout(std::time::Duration::from_secs(5), async move {
loop {
if let Ok(cont) = bus_reciever.try_recv() {
return cont;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
})
.await?;
if let BusMessage::Response(_, _, content) = resp {
let content: Box<dyn Any> = content;
if let Ok(resp) = content.downcast::<anyhow::Result<String>>() {
return Ok((*resp)?);
}
}
return Err(anyhow::Error::msg(format!(
"Unknown type of response from CLI"
)));
}
Ok(String::from("Ok"))
}
_ => Ok(String::from("Ok")),
};
}

View File

@ -1,84 +1,68 @@
use super::preboot::PrebootParams;
use super::structs::*; use super::structs::*;
use crate::utils::files::create_watcher;
use inotify::EventMask;
use log::{error, info, warn}; use log::{error, info, warn};
use redis::{Client, Connection}; use redis::{Client, Connection};
use std::fs::File;
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::io::Write; use std::io::Write;
use std::os::unix::process::CommandExt; use std::os::unix::process::CommandExt;
use std::process::Command; use std::process::Command;
use std::sync::Arc; use std::sync::Arc;
use std::{env, fs}; use std::{env, fs};
use super::preboot::PrebootParams; use tokio::sync::{
use tokio::time::{Duration, sleep}; broadcast::Receiver as BroadcastReceiver,
// use redis::PubSub; broadcast::Sender as BroadcastSender,
use tokio::sync::{
oneshot, oneshot,
oneshot::{ Receiver as OneShotReciever, Sender as OneShotSender }, oneshot::{Receiver as OneShotReciever, Sender as OneShotSender},
broadcast::Sender as BroadcastSender, broadcast::Receiver as BroadcastReceiver }; };
use crate::utils::files::create_watcher; use tokio::time::{sleep, Duration};
use std::fs::File;
use inotify::EventMask;
// const CONFIG_PATH: &str = "settings.json"; // const CONFIG_PATH: &str = "settings.json";
pub mod v2 { pub mod v2 {
use std::path::PathBuf;
use crate::utils::get_container_id; use crate::utils::get_container_id;
use std::path::PathBuf;
use super::*; use super::*;
pub async fn init_config_mechanism( pub async fn init_config_mechanism(
// to handle cli config changes // to handle cli config changes
cli_oneshot: OneShotReciever<Processes>, cli_oneshot: OneShotReciever<Processes>,
// to share local config with PRCS, CLI_PIPELINE and CONFIG modules // to share local config with PRCS, CLI_PIPELINE and CONFIG modules
brd_tx : BroadcastSender<Processes>, brd_tx: BroadcastSender<Processes>,
// preboot params (args) // preboot params (args)
params : Arc<PrebootParams> params: Arc<PrebootParams>, /*...*/
/*...*/ ) {
) {
// channel for pubsub to handle local config pulling // channel for pubsub to handle local config pulling
let local_config_brd_reciever = brd_tx.subscribe(); let local_config_brd_reciever = brd_tx.subscribe();
// channel between pub-sub mech and local config mech // channel between pub-sub mech and local config mech
let (tx_pb_lc, rx_pb_lc) = oneshot::channel::<bool>(); let (tx_pb_lc, rx_pb_lc) = oneshot::channel::<bool>();
// channel between cli mech and local config mech // channel between cli mech and local config mech
let (tx_cli_lc, rx_cli_lc) = oneshot::channel::<bool>(); let (tx_cli_lc, rx_cli_lc) = oneshot::channel::<bool>();
// dbg!("before lc"); // dbg!("before lc");
let params_clone = params.clone(); let params_clone = params.clone();
let for_lc_path = params.clone(); let for_lc_path = params.clone();
let lc_path = for_lc_path let lc_path = for_lc_path.config.to_str().unwrap_or("settings.json");
.config
.to_str()
.unwrap_or("settings.json");
// future to init work with local config // future to init work with local config
let lc_future = tokio::spawn( let lc_future = tokio::spawn(
// let params = params.clone(); // let params = params.clone();
local_config_reciever( local_config_reciever(params_clone, rx_pb_lc, rx_cli_lc, Arc::new(brd_tx)),
params_clone,
rx_pb_lc,
rx_cli_lc,
Arc::new(brd_tx)
)
); );
// dbg!("before pb"); // dbg!("before pb");
// future to init work with pub sub mechanism // future to init work with pub sub mechanism
let pubsub_future = tokio::spawn( let pubsub_future = tokio::spawn(pubsub_config_reciever(
pubsub_config_reciever( tx_pb_lc,
tx_pb_lc, params.clone(),
params.clone(), local_config_brd_reciever,
local_config_brd_reciever ));
)
);
// dbg!("before cli"); // dbg!("before cli");
// future to catch new configs from cli pipeline // future to catch new configs from cli pipeline
let cli_future = tokio::spawn( let cli_future = tokio::spawn(from_cli_config_reciever(cli_oneshot, tx_cli_lc));
from_cli_config_reciever(
cli_oneshot,
tx_cli_lc
)
);
// let _ = lc_future.await; // let _ = lc_future.await;
// dbg!("before select"); // dbg!("before select");
tokio::select! { tokio::select! {
@ -107,7 +91,7 @@ pub mod v2 {
Ok(res) => { Ok(res) => {
if res.is_ok() { if res.is_ok() {
info!("New config was saved locally, restarting ..."); info!("New config was saved locally, restarting ...");
} }
else { else {
error!("Pubsub mechanism crushed, restarting ..."); error!("Pubsub mechanism crushed, restarting ...");
} }
@ -136,12 +120,15 @@ pub mod v2 {
} }
// dbg!("after select"); // dbg!("after select");
// TODO! futures + select! [OK] // TODO! futures + select! [OK]
// TODO! tests config // TODO! tests config
} }
pub async fn get_redis_connection(params: &str) -> Option<Connection> { pub async fn get_redis_connection(params: &str) -> Option<Connection> {
for i in 1..=3 { for i in 1..=3 {
let redis_url = format!("redis://{}/", params); let redis_url = format!("redis://{}/", params);
info!("Trying to connect Redis pubsub `{}`. Attempt {}", &redis_url, i); info!(
"Trying to connect Redis pubsub `{}`. Attempt {}",
&redis_url, i
);
if let Ok(client) = Client::open(redis_url) { if let Ok(client) = Client::open(redis_url) {
if let Ok(conn) = client.get_connection() { if let Ok(conn) = client.get_connection() {
info!("Successfully opened Redis connection"); info!("Successfully opened Redis connection");
@ -154,13 +141,13 @@ pub mod v2 {
None None
} }
// loop checking redis pubsub // loop checking redis pubsub
async fn pubsub_config_reciever( async fn pubsub_config_reciever(
// to stop checking local config // to stop checking local config
local_conf_tx : OneShotSender<bool>, local_conf_tx: OneShotSender<bool>,
params : Arc<PrebootParams>, params: Arc<PrebootParams>,
tx_brd_local : BroadcastReceiver<Processes>, tx_brd_local: BroadcastReceiver<Processes>,
) -> anyhow::Result<()>{ ) -> anyhow::Result<()> {
/*...*/ /*...*/
// dbg!("start of pb"); // dbg!("start of pb");
let mut tx_brd_local = tx_brd_local; let mut tx_brd_local = tx_brd_local;
@ -177,23 +164,28 @@ pub mod v2 {
}; };
} }
}; };
match get_redis_connection(&local_config.config_server).await { match get_redis_connection(&params.remote_server_url).await {
Some(mut conn) => { Some(mut conn) => {
let mut pub_sub = conn.as_pubsub(); let mut pub_sub = conn.as_pubsub();
let channel_name = get_container_id().unwrap_or(String::from("default")); let channel_name = get_container_id().unwrap_or(String::from("default"));
let channel_name = channel_name.trim(); match pub_sub.subscribe(&channel_name) {
match pub_sub.subscribe(channel_name) {
Err(er) => { Err(er) => {
error!("Cannot subscribe pubsub channel due to {}", &er); error!("Cannot subscribe pubsub channel due to {}", &er);
return Err(anyhow::Error::msg(format!("Cannot subscribe pubsub channel due to {}", er))) return Err(anyhow::Error::msg(format!(
}, "Cannot subscribe pubsub channel due to {}",
er
)));
}
Ok(_) => { Ok(_) => {
info!("Successfully subscribed to {} pubsub channel", channel_name); info!(
let _ = pub_sub.set_read_timeout(Some(Duration::from_secs(3))); "Successfully subscribed to {} pubsub channel",
&channel_name
);
let _ = pub_sub.set_read_timeout(Some(Duration::from_secs(1)));
loop { loop {
if let Ok(msg) = pub_sub.get_message() { if let Ok(msg) = pub_sub.get_message() {
// dbg!("ok on get message"); // dbg!("ok on get message");
let payload : Result<String, _> = msg.get_payload(); let payload: Result<String, _> = msg.get_payload();
match payload { match payload {
Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "), Err(_) => error!("Cannot read new config from Redis channel. Check network or Redis configuration "),
Ok(payload) => { Ok(payload) => {
@ -224,38 +216,35 @@ pub mod v2 {
}, },
} }
} }
// delay // delay
tokio::task::yield_now().await; tokio::task::yield_now().await;
} }
}, }
} }
}, }
None => { None => {
sleep(Duration::from_secs(20)).await; sleep(Duration::from_secs(20)).await;
} }
} }
Ok(()) Ok(())
} }
// //
async fn local_config_reciever( async fn local_config_reciever(
params : Arc<PrebootParams>, params: Arc<PrebootParams>,
pubsub_oneshot : OneShotReciever<bool>, pubsub_oneshot: OneShotReciever<bool>,
cli_oneshot : OneShotReciever<bool>, cli_oneshot: OneShotReciever<bool>,
brd_tx : Arc<BroadcastSender<Processes>>, brd_tx: Arc<BroadcastSender<Processes>>,
/*...*/ /*...*/
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
/*...*/ /*...*/
// shadowing as mut // shadowing as mut
let mut pubsub_oneshot = pubsub_oneshot; let mut pubsub_oneshot = pubsub_oneshot;
let mut cli_oneshot = cli_oneshot; let mut cli_oneshot = cli_oneshot;
// fill with default empty config, mut to change later // fill with default empty config, mut to change later
let mut _current_config = Processes::default(); let mut _current_config = Processes::default();
// PathBuf to &str to work with local config path as slice // PathBuf to &str to work with local config path as slice
let local_config_path = params let local_config_path = params.config.to_str().unwrap_or("settings.json");
.config
.to_str()
.unwrap_or("settings.json");
match load_processes(local_config_path) { match load_processes(local_config_path) {
// if local exists // if local exists
@ -265,30 +254,30 @@ pub mod v2 {
if let Err(er) = brd_tx.send(_current_config.clone()) { if let Err(er) = brd_tx.send(_current_config.clone()) {
error!("Cannot share local config with broadcast due to {}", er); error!("Cannot share local config with broadcast due to {}", er);
} }
}, }
// if local is not exist // if local is not exist
None => { None => {
warn!("Local config wasn't found. Waiting for new ..."); warn!("Local config wasn't found. Waiting for new ...");
return Err(anyhow::Error::msg("No local config")); return Err(anyhow::Error::msg("No local config"));
// ... // ...
}, }
} }
// 100% local exists here // 100% local exists here
// create watcher on local config file // create watcher on local config file
match create_watcher("", local_config_path) { match create_watcher("", local_config_path) {
Ok(mut watcher) => { Ok(mut watcher) => {
loop { loop {
let mut need_to_export_config = false; let mut need_to_export_config = false;
// let mut need_to_recreate_watcher = false; // let mut need_to_recreate_watcher = false;
// return situations here // return situations here
// 1) oneshot signal // 1) oneshot signal
// 2) if config was deleted -> recreate and fill with current config that is held here // 2) if config was deleted -> recreate and fill with current config that is held here
// 3) if config was changed -> fill with current config that is held here // 3) if config was changed -> fill with current config that is held here
// catching signal from pubsub // catching signal from pubsub
// it's because pubsub mech pulled new valid and actual config and now it's time to ... // it's because pubsub mech pulled new valid and actual config and now it's time to ...
// ... overwrite local config file and restart main thread // ... overwrite local config file and restart main thread
if let Ok(_) = pubsub_oneshot.try_recv() { if let Ok(_) = pubsub_oneshot.try_recv() {
sleep(Duration::from_secs(1)).await; sleep(Duration::from_secs(1)).await;
return Ok(()); return Ok(());
@ -296,7 +285,7 @@ pub mod v2 {
// catching signal from cli // catching signal from cli
// it's because cli mech pulled new valid and actual config and now it's time to ... // it's because cli mech pulled new valid and actual config and now it's time to ...
// ... overwrite local config file and restart main thread (like in previous mechanism) // ... overwrite local config file and restart main thread (like in previous mechanism)
if let Ok(_) = cli_oneshot.try_recv() { if let Ok(_) = cli_oneshot.try_recv() {
sleep(Duration::from_secs(1)).await; sleep(Duration::from_secs(1)).await;
return Ok(()); return Ok(());
@ -313,7 +302,7 @@ pub mod v2 {
} else { } else {
// changes check // changes check
let mut buffer = [0; 128]; let mut buffer = [0; 128];
let events = watcher.read_events(&mut buffer); let events = watcher.read_events(&mut buffer);
if events.is_ok() { if events.is_ok() {
let events: Vec<EventMask> = events let events: Vec<EventMask> = events
.unwrap() .unwrap()
@ -325,16 +314,14 @@ pub mod v2 {
if !events.is_empty() { if !events.is_empty() {
warn!("Local config file was overwritten. Discarding changes ..."); warn!("Local config file was overwritten. Discarding changes ...");
need_to_export_config = true; need_to_export_config = true;
// events
// .iter()
// .any(|event| *event == EventMask::DELETE_SELF)
// .then(|| need_to_recreate_watcher = true);
} }
} }
} }
// exporting data // exporting data
if need_to_export_config { if need_to_export_config {
if let Err(er) = export_saved_config_data_locally(&params.config, &_current_config).await { if let Err(er) =
export_saved_config_data_locally(&params.config, &_current_config).await
{
error!("Cannot save actual imported config due to {}", er); error!("Cannot save actual imported config due to {}", er);
} else { } else {
// recreation watcher (draining activity buffer mechanism) // recreation watcher (draining activity buffer mechanism)
@ -349,66 +336,65 @@ pub mod v2 {
sleep(Duration::from_millis(300)).await; sleep(Duration::from_millis(300)).await;
// tokio::task::yield_now().await; // tokio::task::yield_now().await;
} }
}, }
Err(_) => { Err(_) => {
error!("Cannot create watcher on local config file `{}`. Deinitializing warding local config mechanism...", local_config_path); error!("Cannot create watcher on local config file `{}`. Deinitializing warding local config mechanism...", local_config_path);
return Err(anyhow::Error::msg("Cannot create watcher on local config file")); return Err(anyhow::Error::msg(
}, "Cannot create watcher on local config file",
));
}
} }
} }
// [:IN-TEST] // [:IN-TEST]
async fn from_cli_config_reciever( async fn from_cli_config_reciever(
cli_oneshot: OneShotReciever<Processes>, cli_oneshot: OneShotReciever<Processes>,
to_local_tx: OneShotSender<bool> to_local_tx: OneShotSender<bool>,
) -> Option<Processes> { ) -> Option<Processes> {
/* match awaits til channel*/ /* match awaits til channel*/
// dbg!("start of cli"); // dbg!("start of cli");
loop { loop {
if !cli_oneshot.is_empty() { if !cli_oneshot.is_empty() {
match cli_oneshot.await { match cli_oneshot.await {
Ok(config_from_cli) => { Ok(config_from_cli) => {
info!("New actual config `{}` from CLI was pulled. Saving and restaring ...", &config_from_cli.date_of_creation); info!(
"New actual config `{}` from CLI was pulled. Saving and restaring ...",
&config_from_cli.date_of_creation
);
let _ = to_local_tx.send(true); let _ = to_local_tx.send(true);
return Some(config_from_cli) return Some(config_from_cli);
}, }
_ => return None, _ => return None,
} }
} }
sleep(Duration::from_millis(300)).await; sleep(Duration::from_millis(300)).await;
} }
} }
async fn export_saved_config_data_locally( async fn export_saved_config_data_locally(
config_file_path: &PathBuf, config_file_path: &PathBuf,
current_config: &Processes current_config: &Processes,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let mut file = File::create(config_file_path)?; let mut file = File::create(config_file_path)?;
file.write_all( file.write_all(serde_json::to_string_pretty(current_config)?.as_bytes())?;
serde_json::to_string_pretty(current_config)?.as_bytes()
)?;
Ok(()) Ok(())
// Ok(()) // Ok(())
} }
} }
/// # Fn `load_processes` /// # Fn `load_processes`
/// ## for reading and parsing *local* storing config /// ## for reading and parsing *local* storing config
/// ///
/// *input* : `&str` /// *input* : `&str`
/// ///
/// *output* : `None` if local conf file doesn't exist or invalid | `Some(conf)` on finish reading and parsing /// *output* : `None` if local conf file doesn't exist or invalid | `Some(conf)` on finish reading and parsing
/// ///
/// *initiator* : func `get_actual_config` /// *initiator* : func `get_actual_config`
/// ///
/// *managing* : conf file name in `&str` format /// *managing* : conf file name in `&str` format
/// ///
/// *depends on* : struct `Processes` /// *depends on* : struct `Processes`
/// ///
fn load_processes(json_filename: &str) -> Option<Processes> { fn load_processes(json_filename: &str) -> Option<Processes> {
if let Ok(res) = fs::read_to_string(json_filename) { if let Ok(res) = fs::read_to_string(json_filename) {
if let Ok(conf) = serde_json::from_str::<Processes>(&res) { if let Ok(conf) = serde_json::from_str::<Processes>(&res) {
@ -418,262 +404,9 @@ fn load_processes(json_filename: &str) -> Option<Processes> {
None None
} }
/// # Fn `get_actual_config`
/// ## for getting actual Monitor's config from local and remote storages
///
/// *input* : -
///
/// *output* : `None` on fatal error in mechanisms | `Some(conf)` on finish reading and parsing
///
/// *initiator* : main thread
///
/// *managing* : -
///
/// *depends on* : struct `Processes`
///
pub async fn get_actual_config(params : Arc<PrebootParams>) -> Option<Processes> {
// * if no local conf -> loop and +inf getting conf from redis server
// * if local conf -> once getting conf from redis server
let config_path = params.config.to_str().unwrap_or_else(|| {
error!("Invalid character in config file. Config path was set to default");
"settings.json"
});
info!("Configurating config module with params: no-sub={}, local config path={:?}, remote server={}", params.no_sub, params.config, params.remote_server_url);
match load_processes(config_path) {
Some(local_conf) => {
info!(
"Found local configuration, version - {}",
&local_conf.date_of_creation
);
if !params.no_sub {
if let Some(remote_conf) =
// TODO : rework with pubsub mech
once_get_remote_configuration(&format!("redis://{}/", &params.remote_server_url))
{
return match config_comparing(&local_conf, &remote_conf) {
ConfigActuality::Local => {
info!("Local config is actual");
Some(local_conf)
}
ConfigActuality::Remote => {
info!("Pulled config is more actual. Saving changes!");
if save_new_config(&remote_conf, config_path).is_err() {
error!("Saving changes process failed due to unexpected error...")
}
Some(remote_conf)
}
};
}
}
Some(local_conf)
}
None => {
warn!("No local valid conf was found. Trying to pull remote one...");
if !params.no_sub {
let mut conn = get_connection_watcher(&open_watcher(&format!("redis://{}/", &params.remote_server_url)));
if let Some(conf) = get_remote_conf_watcher(&mut conn).await {
info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation);
let _ = save_new_config(&conf, config_path);
return Some(conf);
}
}
None
}
}
}
/// # Fn `get_remote_conf_watcher`
/// ## for infinitive pulling remote config
///
/// *input* : `&mut Connection`
///
/// *output* : `None` on fatal error | `Some(conf)` on succesfull pulling
///
/// *initiator* : fn `get_actual_config`
///
/// *managing* : mut ref `Connection` object
///
/// *depends on* : struct `Processes`
///
async fn get_remote_conf_watcher(conn : &mut Connection) -> Option<Processes> {
let mut conn = conn.as_pubsub();
let cont = crate::utils::get_container_id();
loop {
match cont {
Some(ref cont) => {
let cont = cont.trim();
if conn.subscribe(cont).is_err() {
// todo : delay
continue;
}
match conn.get_message() {
Ok(msg) => {
let msg: Result<String, redis::RedisError> = msg.get_payload();
if let Ok(payload) = msg {
if let Some(remote) = parse_extern_config(&payload) {
return Some(remote)
}
else {
error!("Pulled invalid config, cannot start. Waiting for remote conf...");
}
} else {
error!("Cannot get Redis message payload. Waiting for remote conf...");
}
// todo : delay
continue;
},
Err(_) => {
// todo : delay
continue;
},
}
},
None => {
error!("Cannot get container id. Returning");
break
},
}
}
None
}
/// # Fn `get_remote_conf_watcher`
/// ## for trying to pull remote config
///
/// > only for situation when local isn't None (no need to fck redis server)
///
/// *input* : `&str`
///
/// *output* : `None` on empty pubsub or error | `Some(conf)` on succesfull pulling
///
/// *initiator* : fn `get_actual_config`
///
/// *managing* : &str of Redis Server credentials
///
/// *depends on* : struct `Processes`
///
fn once_get_remote_configuration(serv_info: &str) -> Option<Processes> {
let cont = crate::utils::get_container_id();
match Client::open(serv_info) {
Ok(client) => {
match client.get_connection() {
Ok(mut conn) => {
let mut conn = conn.as_pubsub();
match conn.subscribe(cont) {
Ok(_) => {
if conn.set_read_timeout(Some(Duration::from_millis(100))).is_err() {
error!("Cannot set reading pubsub timeout and pull remote config");
return None;
}
match conn.get_message() {
Ok(msg) => {
info!("Pulled config from Redis Server");
let get_payload: Result<String, redis::RedisError> = msg.get_payload();
match get_payload {
Ok(payload) => {
let remote = parse_extern_config(&payload);
if remote.is_none() {
error!("Pulled config is invalid. Check it in Redis Server");
}
remote
},
Err(_) => {
error!("Cannot extract payload from new message. Check Redis Server state");
None
},
}
},
Err(_) => {
None
},
}
},
Err(_) => {
error!("Redis subscription process failed. Check Redis configuration!");
None
}
}
}
Err(_) => {
error!("Redis connection attempt is failed. Check Redis configuration!");
None
}
}
}
Err(_) => {
error!("Redis-Client opening attempt is failed. Check network configuration!");
None
}
}
}
// ! watchers
/// # Fn `open_watcher`
/// ## for infinitive opening Redis client
///
/// > only for situation when local isn't None (no need to fck redis server)
///
/// *input* : `Option<Processes>`
///
/// *output* : redis::Client on successful opening client
///
/// *initiator* : fn `get_actual_config`
///
/// *managing* : &str of Redis Server credentials
///
/// *depends on* : struct `redis::Client`
///
fn open_watcher(serv_info: &str) -> Client {
loop {
match Client::open(serv_info) {
Ok(redis) => {
info!("Successfully opened Redis-Client");
return redis;
}
Err(_) => {
error!("Redis-Client opening attempt is failed. Check network configuration! Retrying...");
std::thread::sleep(Duration::from_secs(4));
}
}
}
}
/// # Fn `get_connection_watcher`
/// ## for infinitive establishing Redis connection on existing client
///
/// > only for situation when local isn't None (no need to fck redis server)
///
/// *input* : `&Client`
///
/// *output* : `Connection`
///
/// *initiator* : fn `get_actual_config`
///
/// *managing* : &Client for opening connection
///
/// *depends on* : struct `redis::Connection`
///
fn get_connection_watcher(client: &Client) -> Connection {
loop {
match client.get_connection() {
Ok(conn) => {
info!("Successfully got Redis connection object");
return conn;
}
Err(_) => {
error!(
"Redis connection attempt is failed. Check Redis configuration! Retrying..."
);
std::thread::sleep(Duration::from_secs(4));
}
}
}
}
/// # Fn `restart_main_thread` /// # Fn `restart_main_thread`
/// ## for restart monitor with new config /// ## for restart monitor with new config
/// ///
/// *input* : - /// *input* : -
/// ///
/// *output* : `Ok(())` on valid restart | `Err(er)` on error /// *output* : `Ok(())` on valid restart | `Err(er)` on error
@ -683,93 +416,16 @@ fn get_connection_watcher(client: &Client) -> Connection {
/// *managing* : - /// *managing* : -
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
fn restart_main_thread() -> std::io::Result<()> { fn restart_main_thread() -> std::io::Result<()> {
let current_exe = env::current_exe()?; let current_exe = env::current_exe()?;
Command::new(current_exe).exec(); let _ = Command::new(current_exe).exec();
Ok(()) Ok(())
} }
/// # Fn `subscribe_config_stream`
/// ## for subscribe on changes, pulling to Redis pubsub to get more actual config
///
/// *input* : `Arc<Processes>`
///
/// *output* : `Ok(())` on end of work | `Err(er)` on error with subscribing mechanism
///
/// *initiator* : fn `subscribe_config_stream`
///
/// *managing* : `Arc<Processes>` to compare old config with new pulled
///
/// *depends on* : `Processes`
///
pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<PrebootParams>) -> Result<(), CustomError> {
let config_path = params.config.to_str().unwrap_or_else(|| "settings.json");
if params.no_sub {
return Err(CustomError::Fatal);
}
if let Ok(client) = Client::open(format!("redis://{}/", &params.remote_server_url)) {
if let Ok(mut conn) = client.get_connection() {
match crate::utils::get_container_id() {
Some(channel_name) => {
let channel_name = channel_name.trim();
let mut pubsub = conn.as_pubsub();
if pubsub.subscribe(&channel_name).is_ok() {
info!("Runner subscribed on config update publishing in channel {}", &channel_name);
loop {
if let Ok(msg) = pubsub.get_message() {
let get_remote_config: Result<String, redis::RedisError> = msg.get_payload();
match get_remote_config {
Ok(payload) => {
if let Some(remote_config) = parse_extern_config(&payload) {
match config_comparing(&actual_prcs, &remote_config) {
ConfigActuality::Remote => {
warn!("Pulled config is actual. Saving and restarting...");
if save_new_config(&remote_config, config_path).is_err() {
error!("Error with saving new config to {}. Stopping sub mechanism...", config_path);
return Err(CustomError::Fatal);
}
if restart_main_thread().is_err() {
error!("Error with restarting Runner. Stopping sub mechanism...");
return Err(CustomError::Fatal);
}
}
_ => {
warn!("Pulled new config. Current config is more actual ...");
continue
},
}
}
else {
error!("Invalid conig was pulled");
}
},
Err(_) => {
error!("Cannot extract new config from message");
break;
},
}
}
sleep(Duration::from_secs(30)).await;
}
} else {
error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name);
}
},
None => {
error!("Cannot get channel name");
}
}
}
}
error!("Error with subscribing Redis stream on update. Working only with selected config...");
Err(CustomError::Fatal)
}
/// # Fn `config_comparing` /// # Fn `config_comparing`
/// ## for compare old and new configs /// ## for compare old and new configs
/// ///
/// *input* : local: `&Processes`, remote: `&Processes` /// *input* : local: `&Processes`, remote: `&Processes`
/// ///
/// *output* : `ConfigActuality::Local` or `ConfigActuality::Remote` /// *output* : `ConfigActuality::Local` or `ConfigActuality::Remote`
@ -779,8 +435,8 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<Pr
/// *managing* : two objects `&Processes` /// *managing* : two objects `&Processes`
/// ///
/// *depends on* : `Processes`, `ConfigActuality` /// *depends on* : `Processes`, `ConfigActuality`
/// ///
fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActuality { pub fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActuality {
if local.is_default() { if local.is_default() {
return ConfigActuality::Remote; return ConfigActuality::Remote;
} }
@ -793,17 +449,9 @@ fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActuality {
} }
} }
// ! TEMPORARILY DEPRECATED !
// fn native_date_from_millis(mls: &str) -> Option<chrono::DateTime<Utc>> {
// match mls.parse::<i64>(){
// Ok(val) => return chrono::DateTime::from_timestamp_millis(val),
// Err(_) => return None,
// }
// }
/// # Fn `save_new_config` /// # Fn `save_new_config`
/// ## mechanism for saving new config in local storage /// ## mechanism for saving new config in local storage
/// ///
/// *input* : `&Processes`, `&str` /// *input* : `&Processes`, `&str`
/// ///
/// *output* : `Ok(())` on succesfull saving | Err(er) on fs error /// *output* : `Ok(())` on succesfull saving | Err(er) on fs error
@ -813,7 +461,7 @@ fn config_comparing(local: &Processes, remote: &Processes) -> ConfigActuality {
/// *managing* : new config object: `&Processes` and config file name: `&str` /// *managing* : new config object: `&Processes` and config file name: `&str`
/// ///
/// *depends on* : `Processes` /// *depends on* : `Processes`
/// ///
fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomError> { fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomError> {
match serde_json::to_string_pretty(&config) { match serde_json::to_string_pretty(&config) {
// Ok(st) => match fs::write(config_file, st) { // Ok(st) => match fs::write(config_file, st) {
@ -844,7 +492,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr
/// # Fn `parse_extern_config` /// # Fn `parse_extern_config`
/// ## for parsing &str to Processes /// ## for parsing &str to Processes
/// ///
/// *input* : `&str` /// *input* : `&str`
/// ///
/// *output* : parsed config in Some(Processes) | None on error with parsing /// *output* : parsed config in Some(Processes) | None on error with parsing
@ -854,7 +502,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr
/// *managing* : unparsed config `&str` /// *managing* : unparsed config `&str`
/// ///
/// *depends on* : `Processes` /// *depends on* : `Processes`
/// ///
fn parse_extern_config(json_string: &str) -> Option<Processes> { fn parse_extern_config(json_string: &str) -> Option<Processes> {
if let Ok(des) = serde_json::from_str::<Processes>(json_string) { if let Ok(des) = serde_json::from_str::<Processes>(json_string) {
return Some(des); return Some(des);
@ -879,28 +527,20 @@ mod config_unittests {
// old one (kinda local) // old one (kinda local)
let a = Processes { let a = Processes {
date_of_creation: String::from("1"), date_of_creation: String::from("1"),
config_server: String::new(),
processes: vec![], processes: vec![],
}; };
// new one (kinda remote) // new one (kinda remote)
let b = Processes { let b = Processes {
date_of_creation: String::from("2"), date_of_creation: String::from("2"),
config_server: String::new(),
processes: vec![], processes: vec![],
}; };
assert_eq!(config_comparing(&a, &b), ConfigActuality::Remote); assert_eq!(config_comparing(&a, &b), ConfigActuality::Remote);
} }
// TODO : strange output
// #[test]
// fn get_actual_config_mechanism() {
// assert!(get_actual_config().is_some())
// }
#[test] #[test]
fn save_config() { fn save_config() {
let a = Processes { let a = Processes {
date_of_creation: String::from("1"), date_of_creation: String::from("1"),
config_server: String::new(),
processes: vec![], processes: vec![],
}; };
assert!(save_new_config(&a, "tests/examples/save-conf.json").is_ok()); assert!(save_new_config(&a, "tests/examples/save-conf.json").is_ok());
@ -910,7 +550,6 @@ mod config_unittests {
fn save_to_zero_file() { fn save_to_zero_file() {
let a = Processes { let a = Processes {
date_of_creation: String::from("1"), date_of_creation: String::from("1"),
config_server: String::new(),
processes: vec![], processes: vec![],
}; };
assert!(save_new_config(&a, "tests/examples/none.json").is_ok()); assert!(save_new_config(&a, "tests/examples/none.json").is_ok());

View File

@ -15,7 +15,7 @@ use crate::utils::get_container_id;
/// # Fn `setup_logger` /// # Fn `setup_logger`
/// ## for initializing process of unstoppable grubbing metrics. /// ## for initializing process of unstoppable grubbing metrics.
/// ///
/// *input* : `Result<()>` /// *input* : `Result<()>`
/// ///
/// *output* : `Err` if it cant create logger | `Ok` after logger initialing /// *output* : `Err` if it cant create logger | `Ok` after logger initialing
@ -25,7 +25,7 @@ use crate::utils::get_container_id;
/// *managing* : - /// *managing* : -
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> { pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> {
// if Command::new("sh").args(["-c", "mkdir logs"]).output().is_err() { // if Command::new("sh").args(["-c", "mkdir logs"]).output().is_err() {
// println!("Error: Cannot init logs directory"); // println!("Error: Cannot init logs directory");
@ -49,7 +49,7 @@ pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> {
record.args(), record.args(),
) )
}) })
.filter(None, LevelFilter::Info) .filter(None, LevelFilter::from_env())
.target(env_logger::Target::Stdout) .target(env_logger::Target::Stdout)
// temporary deprecated // temporary deprecated
// .target(env_logger::Target::Pipe(log_target)) // .target(env_logger::Target::Pipe(log_target))
@ -58,6 +58,27 @@ pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> {
Ok(()) Ok(())
} }
trait FromEnv {
fn from_env() -> LevelFilter;
}
impl FromEnv for LevelFilter {
fn from_env() -> LevelFilter {
return match std::env::var("NOXIS_MAX_LOG_LEVEL") {
Ok(var) => match var.to_ascii_lowercase().trim().as_ref() {
"trace" => LevelFilter::Trace,
"debug" => LevelFilter::Debug,
"info" => LevelFilter::Info,
"error" => LevelFilter::Error,
"warn" => LevelFilter::Warn,
"off" => LevelFilter::Off,
_ => LevelFilter::Info,
},
Err(_) => LevelFilter::Info,
};
}
}
#[cfg(test)] #[cfg(test)]
mod logger_tests { mod logger_tests {
use super::*; use super::*;

View File

@ -1,118 +1,51 @@
// module to handle pre-boot params of the monitor //!
//! Module to handle `pre-boot params` of the monitor (calling also as `settings`)
//!
#[allow(unused_imports)] #[allow(unused_imports)]
use anyhow::{Result, Ok, Error}; use anyhow::{Error, Result};
use clap::Parser;
use std::path::PathBuf;
use std::env::var;
use dotenv::dotenv; use dotenv::dotenv;
use log::warn;
const SOCKET_PATH: &str = "/var/run/enode/hostagent.sock"; use std::env::var;
use std::path::PathBuf;
///
enum EnvVars {
NoxisNoHagent,
NoxisNoLogs,
NoxisRefreshLogs,
// NoxisNoRemoteConfig,
NoxisNoConfigSub,
NoxisSocketPath,
NoxisLogTo,
NoxisRemoteServerUrl,
NoxisConfig,
NoxisMetrics,
}
///
impl std::fmt::Display for EnvVars {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
EnvVars::NoxisNoHagent => write!(f, "NOXIS_NO_HAGENT"),
EnvVars::NoxisNoLogs => write!(f, "NOXIS_NO_LOGS"),
EnvVars::NoxisRefreshLogs => write!(f, "NOXIS_REFRESH_LOGS"),
// EnvVars::NoxisNoRemoteConfig => write!(f, "NOXIS_NO_REMOTE_CONFIG"),
EnvVars::NoxisNoConfigSub => write!(f, "NOXIS_NO_CONFIG_SUB"),
EnvVars::NoxisSocketPath => write!(f, "NOXIS_SOCKET_PATH"),
EnvVars::NoxisLogTo => write!(f, "NOXIS_LOG_TO"),
EnvVars::NoxisRemoteServerUrl => write!(f, "NOXIS_REMOTE_SERVER_URL"),
EnvVars::NoxisConfig => write!(f, "NOXIS_CONFIG"),
EnvVars::NoxisMetrics => write!(f, "NOXIS_METRICS"),
}
}
}
///
impl<'a> EnvVars {
// Default trait func is not satisfying this issue
fn default(self) -> &'a str {
match self {
EnvVars::NoxisNoHagent => "false",
EnvVars::NoxisNoLogs => "false",
EnvVars::NoxisRefreshLogs => "false",
// EnvVars::NoxisNoRemoteConfig => "false",
EnvVars::NoxisNoConfigSub => "false",
EnvVars::NoxisSocketPath => "/var/run/enode/hostagent.sock",
EnvVars::NoxisLogTo => "./",
EnvVars::NoxisRemoteServerUrl => "localhost",
EnvVars::NoxisConfig => "./settings.json",
EnvVars::NoxisMetrics => "full",
}
}
fn process_env_var(self, preboot_value: &str) {
// let default = self.default();
match var(self.to_string()) {
std::result::Result::Ok(val) => {
if val != preboot_value {
std::env::set_var(self.to_string(), self.default());
}
},
Err(_) => {
std::env::set_var(self.to_string(), preboot_value);
},
}
}
pub fn setup(preboot: &PrebootParams) {
// setup default if not exists
// check values and save preboot states in env vars if not equal
Self::NoxisNoHagent.process_env_var(&preboot.no_hostagent.to_string());
Self::NoxisNoLogs.process_env_var(&preboot.no_logs.to_string());
Self::NoxisRefreshLogs.process_env_var(&preboot.refresh_logs.to_string());
// Self::NoxisNoRemoteConfig.process_env_var(&preboot.no_remote_config.to_string());
Self::NoxisNoConfigSub.process_env_var(&preboot.no_sub.to_string());
Self::NoxisSocketPath.process_env_var(preboot.socket_path.to_str().unwrap());
Self::NoxisLogTo.process_env_var(preboot.log_to.to_str().unwrap());
Self::NoxisRemoteServerUrl.process_env_var(&preboot.remote_server_url);
Self::NoxisConfig.process_env_var(preboot.config.to_str().unwrap());
Self::NoxisMetrics.process_env_var(&preboot.metrics.to_string());
}
}
/// # Enum `MetricsPrebootParams` /// # Enum `MetricsPrebootParams`
/// ## for setting up metrics mode as preboot param from command prompt /// ## for setting up metrics mode as preboot param from command prompt
/// ///
/// examples: /// examples:
/// ``` bash /// ``` bash
/// noxis-rs ... --metrics full /// noxis-rs ... --metrics full
/// noxis-rs ... --metrics system /// noxis-rs ... --metrics system
/// noxis-rs ... --metrics processes /// noxis-rs ... --metrics processes
/// noxis-rs ... --metrics net /// noxis-rs ... --metrics net
/// noxis-rs ... --metrics none /// noxis-rs ... --metrics none
/// ``` /// ```
/// ///
#[derive(clap::ValueEnum, Debug, Clone)] #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub enum MetricsPrebootParams { pub enum MetricsPrebootParams {
Full, Full,
System, System,
Processes, Processes,
Net, Net,
None, None,
} }
impl MetricsPrebootParams {
fn from_env(var: &str) -> Self {
match var.trim().to_lowercase().as_str() {
"full" => Self::Full,
"system" => Self::System,
"processes" => Self::Processes,
"net" => Self::Net,
"none" => Self::None,
_ => Self::Full,
}
}
}
/// # `std::fmt::Display` implementation for `MetricsPrebootParams` /// # `std::fmt::Display` implementation for `MetricsPrebootParams`
/// ## to enable parsing object to String /// ## to enable parsing object to String
impl std::fmt::Display for MetricsPrebootParams { impl std::fmt::Display for MetricsPrebootParams {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self { match self {
MetricsPrebootParams::Full => write!(f, "full"), MetricsPrebootParams::Full => write!(f, "full"),
MetricsPrebootParams::System => write!(f, "system"), MetricsPrebootParams::System => write!(f, "system"),
@ -120,260 +53,317 @@ impl std::fmt::Display for MetricsPrebootParams {
MetricsPrebootParams::Net => write!(f, "net"), MetricsPrebootParams::Net => write!(f, "net"),
MetricsPrebootParams::None => write!(f, "none"), MetricsPrebootParams::None => write!(f, "none"),
} }
} }
} }
/// # struct `PrebootParams` /// struct to handle Noxis settings (that were set as `.env` or global env vars)
/// ## to parse and set up all modes as preboot params from command prompt ///
/// /// to parse and set up all modes as preboot params from `.env` or using `export $VAR $VAL` command
/// ### args : ///
/// /// # Settings :
/// `--no-hagent` - to disable hagent work module and set up work mode as autonomous ///
/// ### usage : /// All settings are divided by `actions` and `values`.
/// ``` bash /// 1. `Actions` - true or false on var exist check
/// noxis-rs ... --no-hagent ... /// > `Actions` is not about values, it can handle any data
///
/// 2. `Values` - classic string-driven environment variables
///
/// # `Actions` vars
///
/// 1. `NOXIS_NO_HAGENT` - to disable hagent work module and set up work mode as autonomous
///
/// **usage** :
/// ``` toml
/// ...
/// NOXIS_NO_HAGENT = "random-text"
/// ...
/// ``` /// ```
/// /// or
///
/// `--no-logs` - to disable logging at all
/// ### usage :
/// ``` bash /// ``` bash
/// noxis-rs ... --no-logs ... /// export NOXIS_NO_HAGENT "random-text"
/// ```
///
/// `--refresh-logs` - to truncate logs directory
/// ### usage :
/// ``` bash
/// noxis-rs ... --refresh-logs ...
/// ```
///
/// `--no-sub` - to disable Redis subscribtion mechanism
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-sub ...
/// ```
///
/// `--socket-path` - to set Unix Domain Socket file's directory
/// ### usage :
/// ``` bash
/// noxis-rs ... --socket-path /var/run/enode/hostagent.sock ...
/// ``` /// ```
/// ///
/// `--log-to` - to set directory for logs ///
/// ### usage : /// 2. `NOXIS_NO_LOGS` - to disable logging at all
/// ``` bash /// **usage** :
/// noxis-rs ... --log-to /dir/to/logs/ ... /// ``` toml
/// ...
/// NOXIS_NO_LOGS = "random-text"
/// ...
/// ``` /// ```
/// /// or
/// `--remote-server-url` - to set Redis Server
/// ### usage :
/// ``` bash /// ``` bash
/// noxis-rs ... --remote-server-url 192.168.28.12 ... /// export NOXIS_NO_LOGS "random-text"
/// ``` /// ```
/// ///
/// `--config` - to set Noxis' config full path /// 3. `NOXIS_REFRESH_LOGS` - to truncate logs directory
///
/// ### usage : /// ### usage :
/// ``` bash /// ``` toml
/// noxis-rs ... --config /etc/enode/settings.json ... /// ...
/// NOXIS_REFRESH_LOGS = "random-text"
/// ...
/// ``` /// ```
/// /// or
/// `--metrics` - to set metrics mode /// ``` bash
/// export NOXIS_REFRESH_LOGS "random-text"
/// ```
///
/// 4. `NOXIS_NO_SUB` - to disable Redis subscribtion mechanism
///
/// ### usage : /// ### usage :
/// ``` bash /// ``` toml
/// noxis-rs ... --metrics full ... /// NOXIS_NO_SUB = "random-text"
/// ``` /// ```
#[derive(Debug, Parser)] /// or
/// ``` bash
/// export NOXIS_NO_SUB "random-text"
/// ```
///
/// # `Values` vars
///
/// 1. `NOXIS_HAGENT_SOCKET_PATH` - to set Unix Domain Socket file's directory
///
/// ### usage :
/// ``` toml
/// NOXIS_HAGENT_SOCKET_PATH = "/var/run/example/hostagent.sock"
/// ```
/// or
/// ``` bash
/// export NOXIS_HAGENT_SOCKET_PATH "/var/run/example/hostagent.sock"
/// ```
///
/// 2 `NOXIS_LOG_TO` - to set directory for logs
///
/// ### usage :
/// ``` toml
/// NOXIS_LOG_TO = "/var/log/noxis/noxis.log"
/// ```
/// or
/// ``` bash
/// export NOXIS_LOG_TO "/var/log/noxis/noxis.log"
/// ```
///
/// 3. `NOXIS_REMOTE_SERVER_URL` - to set Redis Server
///
/// ### usage :
/// ``` toml
/// NOXIS_REMOTE_SERVER_URL = "ip.ip.ip.ip:port"
/// ```
/// or
/// ``` bash
/// export NOXIS_REMOTE_SERVER_URL "ip.ip.ip.ip:port"
/// ```
///
/// 4. `NOXIS_CONFIG_PATH` - to set Noxis' config full path
///
/// ### usage :
/// ``` toml
/// NOXIS_CONFIG_PATH = "./settings.json"
/// ```
/// or
/// ``` bash
/// export NOXIS_CONFIG_PATH "./settings.json"
/// ```
///
/// 5. `NOXIS_METRICS_MODE` - to set metrics mode
///
/// ### usage :
/// ``` toml
/// NOXIS_METRICS_MODE = "full"
/// ```
/// or
/// ``` bash
/// export NOXIS_METRICS_MODE "full"
/// ```
///
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub struct PrebootParams { pub struct PrebootParams {
// actions // pub no_hostagent : bool,
#[arg(
long = "no-hagent",
action,
conflicts_with="socket_path",
help="To disable work with host-agent"
)]
pub no_hostagent : bool,
#[arg(
long = "no-logs",
action,
conflicts_with="log_to",
help="To disable logs"
)]
pub no_logs: bool, pub no_logs: bool,
#[arg( pub refresh_logs: bool,
long = "refresh-logs", pub no_sub: bool,
action, // pub socket_path : PathBuf,
conflicts_with="no_logs", pub log_to: PathBuf,
help="To clear logs directory" pub remote_server_url: String,
)] pub config: PathBuf,
pub refresh_logs : bool,
// #[arg(
// long = "no-remote-config",
// action,
// help="To disable work with remote config server",
// conflicts_with="no_sub")]
// pub no_remote_config : bool,
#[arg(
long = "no-sub",
action,
help="To disable Redis subscription mechanism",
)]
// conflicts_with="no_remote_config"
pub no_sub : bool,
// params (socket_path, log_to, remote_server_url, config)
#[arg(
long = "socket-path",
default_value="/var/run/enode/hostagent.sock",
conflicts_with="no_hostagent",
help="To set .sock file's path used in communication with host-agent"
)]
pub socket_path : PathBuf,
#[arg(
long = "log-to",
default_value="./",
conflicts_with="no_logs",
help="To set a path to logs directory"
)]
pub log_to : PathBuf,
#[arg(
long = "remote-server-url",
default_value="localhost",
conflicts_with="no_sub",
help = "To set url of remote config server using in remote config pulling mechanism"
)]
pub remote_server_url : String,
#[arg(
long = "config",
short,
default_value="settings.json",
help="To set local config file path"
)]
pub config : PathBuf,
// value enum params (metrics)
#[arg(
long = "metrics",
short,
default_value_t=MetricsPrebootParams::Full,
help="To set metrics grubbing mode"
)]
pub metrics: MetricsPrebootParams, pub metrics: MetricsPrebootParams,
pub self_socket: PathBuf,
pub backup_folder: PathBuf,
} }
/// # implementation for `MetricsPrebootParams` /// # implementation for `MetricsPrebootParams`
/// ## to enable validation mechanism /// ## to enable validation mechanism
impl PrebootParams { impl PrebootParams {
pub fn validate(mut self) -> Result<Self> { pub fn validate() -> Self {
dotenv().ok(); dotenv().ok();
if !self.socket_path.exists() && !self.no_hostagent { Self {
if self.socket_path.to_string_lossy() == SOCKET_PATH { // bool
self.no_hostagent = true; // no_hostagent : {
eprintln!("Warning: Socket-file wasn't found. Working without hostagent module..."); // match var("NOXIS_NO_HAGENT") {
} else { // Ok(_) => true,
eprintln!("Warning: Socket-file wasn't found or Noxis can't read it. Socket-file was set to default"); // Err(_) => false,
if !PathBuf::from(SOCKET_PATH).exists() { // }
self.no_hostagent = true; // },
eprintln!("Warning: Socket-file wasn't found. Working without hostagent module..."); no_logs: {
} else { match var("NOXIS_NO_LOGS") {
self.socket_path = PathBuf::from(SOCKET_PATH); Ok(_) => true,
Err(_) => false,
} }
} },
// return Err(Error::msg("Socket-file not found or Noxis can't read it. Cannot start")); refresh_logs: {
match var("NOXIS_REFRESH_LOGS") {
Ok(_) => true,
Err(_) => false,
}
},
no_sub: {
match var("NOXIS_NO_SUB") {
Ok(_) => true,
Err(_) => false,
}
},
// vals
// socket_path : {
// match var("NOXIS_HAGENT_SOCKET_PATH") {
// Ok(val) => PathBuf::from(val),
// Err(_) => PathBuf::from("/var/run/enode/hostagent.sock"),
// }
// },
log_to: {
match var("NOXIS_LOG_TO") {
Ok(val) => PathBuf::from(val),
Err(_) => PathBuf::from("./"),
}
},
remote_server_url: {
match var("NOXIS_REMOTE_SERVER_URL") {
Ok(val) => val,
Err(_) => String::from("localhost"),
}
},
config: {
match var("NOXIS_CONFIG_PATH") {
Ok(val) => PathBuf::from(val),
Err(_) => PathBuf::from("./settings.json"),
}
},
metrics: {
match var("NOXIS_METRICS_MODE") {
Ok(val) => MetricsPrebootParams::from_env(&val),
Err(_) => MetricsPrebootParams::Full,
}
},
self_socket: {
match var("NOXIS_SOCKET_PATH") {
Ok(val) => PathBuf::from(val),
Err(_) => {
let default = std::env::current_dir()
.expect("Crushed on getting current_dir path. Check fs state!");
warn!(
"$NOXIS_SOCKET_PATH wans't set. Default value - {}",
default.display()
);
PathBuf::from(default)
}
}
},
backup_folder: {
match var("NOXIS_BACKUP_FOLDER") {
Ok(val) => {
let path = PathBuf::from(val);
if path.exists() && path.is_dir() {
path
} else {
PathBuf::from(std::env::current_dir()
.expect("Crushed on getting current_dir path. Check fs state!")
)
}
},
Err(_) => {
let default = std::env::current_dir()
.expect("Crushed on getting current_dir path. Check fs state!");
warn!(
"$NOXIS_BACKUP_FOLDER wans't set. Default value - {}",
default.display()
);
PathBuf::from(default)
}
}
},
} }
// existing log dir
if !self.log_to.exists() && !self.no_logs {
eprintln!("Error: Log-Dir not found or Noxis can't read it. LogDir was set to default");
self.refresh_logs = false;
self.log_to = PathBuf::from("./");
// return Err(Error::msg("Log Directory Not Found or Noxis can't read it. Cannot start"));
}
// existing sock file
if !self.config.exists() {
eprintln!("Error: Invalid character in config file. Config path was set to default");
// TODO : ??? wtf is going with 2 paths
let config = PathBuf::from("/etc/enode/noxis/settings.json");
if !config.exists() && self.no_sub {
return Err(Error::msg("Noxis cannot run without config. Create local config or enable pubsub mechanism"));
}
self.config = PathBuf::from("settings.json");
// return Err(Error::msg("Local Config Not Found or Noxis can't read it. Cannot start"));
}
// redis server check
EnvVars::setup(&self);
Ok(self)
} }
} }
// unit tests of preboot params parsing mech // unit tests of preboot params parsing mech
#[cfg(test)] // #[cfg(test)]
mod preboot_unitests{ // mod preboot_unitests{
use super::*; // use super::*;
#[test] // #[test]
fn parsing_zero_args() { // fn parsing_zero_args() {
assert!(PrebootParams::try_parse_from(vec!["runner-rs"]).is_ok()) // assert!(PrebootParams::try_parse_from(vec!["runner-rs"]).is_ok())
} // }
#[test] // #[test]
fn parsing_hagent_valid_args() { // fn parsing_hagent_valid_args() {
assert!(PrebootParams::try_parse_from(vec![ // assert!(PrebootParams::try_parse_from(vec![
"runner-rs", // "runner-rs",
"--socket-path", "/path/to/socket" // "--socket-path", "/path/to/socket"
]).is_ok()) // ]).is_ok())
} // }
#[test] // #[test]
fn parsing_hagent_invalid_args() { // fn parsing_hagent_invalid_args() {
assert!(PrebootParams::try_parse_from(vec![ // assert!(PrebootParams::try_parse_from(vec![
"runner-rs", // "runner-rs",
"--socket-path", "/path/to/socket", // "--socket-path", "/path/to/socket",
"--no-hagent" // "--no-hagent"
]).is_err()) // ]).is_err())
} // }
#[test] // #[test]
fn parsing_log_valid_args() { // fn parsing_log_valid_args() {
assert!(PrebootParams::try_parse_from(vec![ // assert!(PrebootParams::try_parse_from(vec![
"runner-rs", // "runner-rs",
"--log-to", "/path/to/log/dir" // "--log-to", "/path/to/log/dir"
]).is_ok()) // ]).is_ok())
} // }
#[test] // #[test]
fn parsing_log_invalid_args() { // fn parsing_log_invalid_args() {
assert!(PrebootParams::try_parse_from(vec![ // assert!(PrebootParams::try_parse_from(vec![
"runner-rs", // "runner-rs",
"--log-to /path/to/log/dir", // "--log-to /path/to/log/dir",
"--no-logs" // "--no-logs"
]).is_err()) // ]).is_err())
} // }
#[test] // #[test]
fn parsing_config_valid_args() { // fn parsing_config_valid_args() {
assert!(PrebootParams::try_parse_from(vec![ // assert!(PrebootParams::try_parse_from(vec![
"runner-rs", // "runner-rs",
"--no-sub", // "--no-sub",
"--remote-server-url", "redis://127.0.0.1" // "--remote-server-url", "redis://127.0.0.1"
]).is_err()) // ]).is_err())
} // }
// #[test] // // #[test]
// fn parsing_config_invalid_args_noremote_nosub() { // // fn parsing_config_invalid_args_noremote_nosub() {
// assert!(PrebootParams::try_parse_from(vec![ // // assert!(PrebootParams::try_parse_from(vec![
// "runner-rs", // // "runner-rs",
// "--no-remote-config", "--no-sub" // // "--no-remote-config", "--no-sub"
// ]).is_err()) // // ]).is_err())
// } // // }
#[test] // #[test]
fn parsing_config_invalid_args_noremote_remoteurl() { // fn parsing_config_invalid_args_noremote_remoteurl() {
assert!(PrebootParams::try_parse_from(vec![ // assert!(PrebootParams::try_parse_from(vec![
"runner-rs", // "runner-rs",
"--no-sub", // "--no-sub",
"--remote-server-url", "redis://127.0.0.1" // "--remote-server-url", "redis://127.0.0.1"
]).is_err()) // ]).is_err())
} // }
#[test] // #[test]
fn parsing_metrics_args_using_value_enum() { // fn parsing_metrics_args_using_value_enum() {
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "full"]).is_ok()); // assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "full"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "system"]).is_ok()); // assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "system"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "processes"]).is_ok()); // assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "processes"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "net"]).is_ok()); // assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "net"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "none"]).is_ok()); // assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "none"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "unusual_value"]).is_err()); // assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "unusual_value"]).is_err());
} // }
} // }

View File

@ -1,4 +1,3 @@
use super::structs::CustomError;
use std::sync::Arc; use std::sync::Arc;
use tokio::io; use tokio::io;
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -11,17 +10,17 @@ type SendersVec = Arc<Vec<Arc<mpsc::Sender<u8>>>>;
/// # Fn set_valid_destructor /// # Fn set_valid_destructor
/// ## for initializing process of unstoppable grubbing metrics. /// ## for initializing process of unstoppable grubbing metrics.
/// ///
/// *input* : `Result<()>` /// *input* : `Result<()>`
/// ///
/// *output* : `Err` if it cant create signals listeners | `Ok` on returning Monitor /// *output* : `Err` if it cant create signals listeners | `Ok` on returning Monitor
/// ///
/// *initiator* : main thread /// *initiator* : main thread
/// ///
/// *managing* : `Arc<Vec<Arc<mpsc::Sender<u8>>>>` /// *managing* : `Arc<Vec<Arc<mpsc::Sender<u8>>>>`
/// ///
/// *depends on* : Sig, Signals /// *depends on* : Sig, Signals
/// ///
pub async fn set_valid_destructor(senders: SendersVec) -> anyhow::Result<()> { pub async fn set_valid_destructor(senders: SendersVec) -> anyhow::Result<()> {
let (mut int, mut term, mut stop) = ( let (mut int, mut term, mut stop) = (
Sig::new(Signals::Sigint, senders.clone()), Sig::new(Signals::Sigint, senders.clone()),
@ -38,9 +37,9 @@ pub async fn set_valid_destructor(senders: SendersVec) -> anyhow::Result<()> {
} }
/// # Enum Signals /// # Enum Signals
/// ## for instancing each managed system signals (such as SIGINT) /// ## for instancing each managed system signals (such as SIGINT)
/// ///
/// > (element needed in Sig constructor's signature) /// > (element needed in Sig constructor's signature)
/// ///
/// *depends on* : - /// *depends on* : -
enum Signals { enum Signals {
Sigint, Sigint,
@ -50,9 +49,9 @@ enum Signals {
/// # Struct Signals /// # Struct Signals
/// ## for instancing each managed system signals (such as SIGINT) /// ## for instancing each managed system signals (such as SIGINT)
/// ///
/// > (needed to construct system signals listener) /// > (needed to construct system signals listener)
/// ///
/// *depends on* : Signals /// *depends on* : Signals
struct Sig { struct Sig {
signal: Signal, signal: Signal,
@ -70,7 +69,7 @@ impl Sig {
} }
} }
/// ## trait Display realization for returning String-name of signal /// ## trait Display realization for returning String-name of signal
/// ///
/// > (needed in logs) /// > (needed in logs)
impl std::fmt::Display for Signals { impl std::fmt::Display for Signals {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
@ -91,20 +90,20 @@ impl Signals {
} }
} }
} }
/// # Trait SigPostProcessing /// # Trait SigPostProcessing
/// ## to handle post-processing jobs after getting system signal /// ## to handle post-processing jobs after getting system signal
/// ///
/// ## > (needed in signals post-processing) /// ## > (needed in signals post-processing)
/// ///
trait SigPostProcessing { trait SigPostProcessing {
async fn post_processing(&mut self) -> io::Result<()>; async fn post_processing(&mut self) -> io::Result<()>;
} }
/// # Trait SigPostProcessing realization for Sig struct /// # Trait SigPostProcessing realization for Sig struct
/// ## to deinitialize Monitor correctly after getting signal /// ## to deinitialize Monitor correctly after getting signal
/// ///
/// ## > (needed in signals post-processing) /// ## > (needed in signals post-processing)
/// ///
impl SigPostProcessing for Sig { impl SigPostProcessing for Sig {
async fn post_processing(&mut self) -> io::Result<()> { async fn post_processing(&mut self) -> io::Result<()> {
// manipulations ... // manipulations ...

View File

@ -1,21 +1,131 @@
#![allow(dead_code)] #![allow(dead_code)]
use std::net::Ipv4Addr;
use serde::{Deserialize, Serialize};
use async_trait::async_trait; use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use std::any::Any;
use std::net::Ipv4Addr;
use std::sync::Arc; use std::sync::Arc;
pub mod bus {
use std::fmt::Debug;
use super::*;
use crate::utils::metrics::processes::ProcessesQuery;
use crate::utils::metrics::MetricsExportable;
use noxis_cli::{metrics_models::MetricsMode, Cli};
pub type BusMessageContent = Box<dyn BusContent>;
#[derive(Debug)]
pub enum BusMessage {
Request(
BusMessageDirection,
BusMessageContentType,
BusMessageContent,
),
Response(
BusMessageDirection,
BusMessageContentType,
BusMessageContent,
),
}
#[derive(Debug)]
pub enum BusMessageDirection {
ToCli,
ToSupervisor,
ToMetrics,
}
#[derive(Debug)]
pub enum BusMessageContentType {
RawString,
Cli,
MetricsObj,
Result,
MetricsModeTransfered,
ProcessQuery,
}
#[derive(Debug)]
pub enum CLiCommand {
Start,
Stop,
Restart,
Freeze,
Unfreeze,
}
#[derive(Debug)]
pub struct InternalCli {
pub prc: String,
pub cmd: CLiCommand,
}
pub trait BusContent: Send + Sync + 'static + Debug + Any {
fn get_bus_type(&self) -> BusMessageContentType;
}
impl BusContent for anyhow::Result<String> {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::Result
}
}
impl BusContent for String {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::RawString
}
}
impl BusContent for Cli {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::Cli
}
}
impl BusContent for InternalCli {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::Cli
}
}
impl BusContent for dyn MetricsExportable {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::MetricsObj
}
}
impl BusContent for Box<dyn MetricsExportable> {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::MetricsObj
}
}
impl BusContent for MetricsMode {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::MetricsModeTransfered
}
}
impl BusContent for ProcessesQuery {
fn get_bus_type(&self) -> BusMessageContentType {
BusMessageContentType::ProcessQuery
}
}
}
#[derive(Debug)] #[derive(Debug)]
pub enum DependencyType { pub enum DependencyType {
File, File,
Service, Service,
} }
#[derive(Debug)] #[derive(Debug, Serialize, Clone, Copy)]
pub enum ServiceState { pub enum ServiceState {
Ok, Ok,
Unavailable Unavailable,
} }
impl std::fmt::Display for ServiceState {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
return match self {
ServiceState::Ok => write!(f, "Ok"),
ServiceState::Unavailable => write!(f, "Unavailable"),
};
}
}
pub struct ServiceWaitConfig(u32); pub struct ServiceWaitConfig(u32);
impl Default for ServiceWaitConfig { impl Default for ServiceWaitConfig {
@ -25,7 +135,7 @@ impl Default for ServiceWaitConfig {
} }
pub enum FileTriggerType { pub enum FileTriggerType {
OnChange, OnChange,
OnDelete, OnDelete,
} }
@ -34,48 +144,87 @@ impl std::fmt::Display for FileTriggerType {
return match self { return match self {
FileTriggerType::OnChange => write!(f, "File was changed"), FileTriggerType::OnChange => write!(f, "File was changed"),
FileTriggerType::OnDelete => write!(f, "File was moved or deleted"), FileTriggerType::OnDelete => write!(f, "File was moved or deleted"),
} };
} }
} }
impl<'a> FileTriggerType { impl<'a> FileTriggerType {
pub fn event(&self, file_name: Arc<str>, trigger: Arc<str>) -> Events { pub fn event(&self, file_name: Arc<str>, trigger: Arc<str>) -> Events {
return match self { return match self {
FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger)), FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(
FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(file_name, DependencyType::File, trigger)), file_name,
} DependencyType::File,
trigger,
)),
FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(
file_name,
DependencyType::File,
trigger,
)),
};
} }
pub fn event_from_file_trigger_controller(&self, file_name: Arc<str>, trigger: &FileTriggersForController) -> Events { pub fn event_from_file_trigger_controller(
&self,
file_name: Arc<str>,
trigger: &FileTriggersForController,
) -> Events {
return match self { return match self {
FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(file_name, DependencyType::File, trigger.on_change.clone())), FileTriggerType::OnChange => Events::Negative(NegativeOutcomes::FileWasChanged(
FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(file_name, DependencyType::File, trigger.on_delete.clone())), file_name,
} DependencyType::File,
trigger.on_change.clone(),
)),
FileTriggerType::OnDelete => Events::Negative(NegativeOutcomes::FileWasMovedOrDeleted(
file_name,
DependencyType::File,
trigger.on_delete.clone(),
)),
};
} }
} }
#[derive(Debug)] #[derive(Debug)]
pub enum Triggers { pub enum Triggers {
File { on_change: Arc<str>, on_delete: Arc<str> }, File {
Service {on_lost: Arc<str>, wait: u32}, on_change: Arc<str>,
on_delete: Arc<str>,
},
Service {
on_lost: Arc<str>,
wait: u32,
},
} }
impl Triggers { impl Triggers {
pub fn new_file(on_change: Arc<str>, on_delete: Arc<str>) -> Triggers { pub fn new_file(on_change: Arc<str>, on_delete: Arc<str>) -> Triggers {
Triggers::File { on_change, on_delete } Triggers::File {
on_change,
on_delete,
}
} }
pub fn new_service(on_lost: Arc<str>, wait_time: u32) -> Triggers { pub fn new_service(on_lost: Arc<str>, wait_time: u32) -> Triggers {
Triggers::Service{on_lost, wait: wait_time} Triggers::Service {
on_lost,
wait: wait_time,
}
} }
pub fn to_service_negative_event(&self, service_name: Arc<str>) -> Option<Events> { pub fn to_service_negative_event(&self, service_name: Arc<str>) -> Option<Events> {
if let Triggers::Service { on_lost, .. } = self { if let Triggers::Service { on_lost, .. } = self {
return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable(service_name, DependencyType::Service, on_lost.clone()))) return Some(Events::Negative(NegativeOutcomes::ServiceIsUnreachable(
service_name,
DependencyType::Service,
on_lost.clone(),
)));
} }
None None
} }
} }
#[derive(Debug)] #[derive(Debug)]
pub struct FileTriggersForController{ pub on_change: Arc<str>, pub on_delete: Arc<str> } pub struct FileTriggersForController {
pub on_change: Arc<str>,
pub on_delete: Arc<str>,
}
pub struct ServiceTriggersForController(Arc<str>); pub struct ServiceTriggersForController(Arc<str>);
impl std::fmt::Display for DependencyType { impl std::fmt::Display for DependencyType {
@ -83,21 +232,34 @@ impl std::fmt::Display for DependencyType {
return match self { return match self {
DependencyType::File => write!(f, "File"), DependencyType::File => write!(f, "File"),
DependencyType::Service => write!(f, "Service"), DependencyType::Service => write!(f, "Service"),
} };
}
}
#[derive(Debug, serde::Serialize, Clone, Copy)]
pub enum ProcessState {
Pending,
Holding,
Stopped,
StoppedByCli,
HoldingByCli,
}
impl std::fmt::Display for ProcessState {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
return match self {
ProcessState::Pending => write!(f, "Running"),
ProcessState::Holding => write!(f, "Frozen"),
ProcessState::Stopped => write!(f, "Stopped"),
ProcessState::StoppedByCli => write!(f, "Stopped by Admin"),
ProcessState::HoldingByCli => write!(f, "Frozen by Admin"),
};
} }
} }
#[derive(Debug)]
pub enum ProcessState {
Pending,
Holding,
Stopped,
StoppedByCli,
}
#[derive(Debug)] #[derive(Debug)]
pub enum Events { pub enum Events {
Positive(Arc<str>), Positive(Arc<str>),
Negative(NegativeOutcomes) Negative(NegativeOutcomes),
} }
#[derive(Debug)] #[derive(Debug)]
pub enum NegativeOutcomes { pub enum NegativeOutcomes {
@ -123,11 +285,11 @@ pub enum ConfigActuality {
/// # Struct for the 1st level in json conf file /// # Struct for the 1st level in json conf file
/// ## for storing main config data /// ## for storing main config data
/// ///
/// > (needed in serialization and deserialization) /// > (needed in serialization and deserialization)
/// ///
/// *depends on* : `TrackingProcess` /// *depends on* : `TrackingProcess`
/// ///
/// ``` json /// ``` json
/// { /// {
/// -> "dateOfCreation": "1721381809104", /// -> "dateOfCreation": "1721381809104",
@ -141,8 +303,6 @@ pub struct Processes {
// runner_id: usize, // runner_id: usize,
#[serde(rename = "dateOfCreation")] #[serde(rename = "dateOfCreation")]
pub date_of_creation: String, pub date_of_creation: String,
#[serde(rename = "configServer")]
pub config_server: String,
#[serde(default)] #[serde(default)]
pub processes: Vec<TrackingProcess>, pub processes: Vec<TrackingProcess>,
} }
@ -150,9 +310,8 @@ pub struct Processes {
impl Default for Processes { impl Default for Processes {
fn default() -> Self { fn default() -> Self {
Self { Self {
date_of_creation : String::new(), date_of_creation: String::new(),
config_server : String::from("default"), processes: Vec::new(),
processes : Vec::new(),
} }
} }
} }
@ -161,15 +320,18 @@ impl Processes {
pub fn is_default(&self) -> bool { pub fn is_default(&self) -> bool {
self.date_of_creation.is_empty() self.date_of_creation.is_empty()
} }
pub fn get_version(&self) -> &str {
&self.date_of_creation
}
} }
/// # Struct for the 2nd level in json conf file /// # Struct for the 2nd level in json conf file
/// ## for each process to contain info, such as name, path and dependencies /// ## for each process to contain info, such as name, path and dependencies
/// ///
/// > (needed in serialization and deserialization) /// > (needed in serialization and deserialization)
/// ///
/// *depends on* : `Dependencies` /// *depends on* : `Dependencies`
/// ///
/// ``` json /// ``` json
/// ... /// ...
/// "processes": [ /// "processes": [
@ -190,11 +352,11 @@ pub struct TrackingProcess {
/// # Struct for the 3d level in json conf file /// # Struct for the 3d level in json conf file
/// ## for processes' dependencies including files and services /// ## for processes' dependencies including files and services
/// ///
/// > (needed in serialization and deserialization) /// > (needed in serialization and deserialization)
/// ///
/// *depends on* : `Files`, `Services` /// *depends on* : `Files`, `Services`
/// ///
/// ``` json /// ``` json
/// ... /// ...
/// "path": "/home/user/monitor/runner-rs/temp-process", /// "path": "/home/user/monitor/runner-rs/temp-process",
@ -214,11 +376,11 @@ pub struct Dependencies {
/// # Struct for the 4th level in json conf file /// # Struct for the 4th level in json conf file
/// ## for containing file object with its triggers to manipulate in daemons /// ## for containing file object with its triggers to manipulate in daemons
/// ///
/// > (needed in serialization and deserialization) /// > (needed in serialization and deserialization)
/// ///
/// *depends on* : `FileTriggers` /// *depends on* : `FileTriggers`
/// ///
/// ``` json /// ``` json
/// ... /// ...
/// "files": [ /// "files": [
@ -226,7 +388,7 @@ pub struct Dependencies {
/// -> "filename": "dep-file", /// -> "filename": "dep-file",
/// -> "src": "/home/user/monitor/runner-rs/tests/examples/", /// -> "src": "/home/user/monitor/runner-rs/tests/examples/",
/// -> "triggers": { ... } /// -> "triggers": { ... }
/// -> } , /// -> } ,
/// ... /// ...
/// ], ... /// ], ...
/// ``` /// ```
@ -239,11 +401,11 @@ pub struct Files {
/// # Struct for the 4th level in json conf file /// # Struct for the 4th level in json conf file
/// ## for containing service object with its triggers to manipulate in daemons /// ## for containing service object with its triggers to manipulate in daemons
/// ///
/// > (needed in serialization and deserialization) /// > (needed in serialization and deserialization)
/// ///
/// *depends on* : `ServiceTriggers` /// *depends on* : `ServiceTriggers`
/// ///
/// ``` json /// ``` json
/// ... /// ...
/// "services": [ /// "services": [
@ -251,7 +413,7 @@ pub struct Files {
/// -> "hostname" : "ya.ru", /// -> "hostname" : "ya.ru",
/// -> "port" : 443, /// -> "port" : 443,
/// -> "triggers": { ... } /// -> "triggers": { ... }
/// -> } , /// -> } ,
/// ... /// ...
/// ], ... /// ], ...
/// ``` /// ```
@ -264,11 +426,11 @@ pub struct Services {
/// # Struct for the 5th level in json conf file /// # Struct for the 5th level in json conf file
/// ## for instancing each service's policies such as on lost or time to wait till reachable /// ## for instancing each service's policies such as on lost or time to wait till reachable
/// ///
/// > (needed in serialization and deserialization) /// > (needed in serialization and deserialization)
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
/// ``` json /// ``` json
/// ... /// ...
/// "port": 443, /// "port": 443,
@ -288,11 +450,11 @@ pub struct ServiceTriggers {
/// # Struct for the 5th level in json conf file /// # Struct for the 5th level in json conf file
/// ## for instancing each file's policies such as on-delete or onupdate events /// ## for instancing each file's policies such as on-delete or onupdate events
/// ///
/// > (needed in serialization and deserialization) /// > (needed in serialization and deserialization)
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
/// ``` json /// ``` json
/// ... /// ...
/// "src": "/home/user/monitor/runner-rs/tests/examples/", /// "src": "/home/user/monitor/runner-rs/tests/examples/",
@ -308,106 +470,107 @@ pub struct FileTriggers {
pub on_delete: String, pub on_delete: String,
#[serde(rename = "onChange")] #[serde(rename = "onChange")]
pub on_change: String, pub on_change: String,
#[serde(rename = "doRestore")]
pub do_restore: bool,
} }
/// # Metrics struct /// # Metrics struct
/// ## for gathering all system metrics (from container + each process) /// ## for gathering all system metrics (from container + each process)
/// ///
/// > (needed in hagent communication, `?...?`) /// > (needed in hagent communication, `?...?`)
/// ///
/// *depends on* : `ContainerMetrics`, `ProcessMetrics` /// *depends on* : `ContainerMetrics`, `ProcessMetrics`
/// ///
#[derive(Debug, Clone, Serialize,)] #[derive(Debug, Clone, Serialize)]
pub struct Metrics { pub struct Metrics {
pub container_metrics : ContainerMetrics, pub container_metrics: ContainerMetrics,
pub processes_metrics : Vec<ProcessMetrics>, pub processes_metrics: Vec<ProcessMetrics>,
// pub net_metrics : Vec<PacketInfo>, // pub net_metrics : Vec<PacketInfo>,
} }
/// ## Metrics struct's constructor /// ## Metrics struct's constructor
impl Metrics { impl Metrics {
pub fn new(cm: ContainerMetrics, prm: Vec<ProcessMetrics>) -> Self { pub fn new(cm: ContainerMetrics, prm: Vec<ProcessMetrics>) -> Self {
Metrics { Metrics {
container_metrics : cm, container_metrics: cm,
processes_metrics : prm, processes_metrics: prm,
// net_metrics : net, // net_metrics : net,
} }
} }
} }
/// # Container metrics struct /// # Container metrics struct
/// ## for gathering all container metrics /// ## for gathering all container metrics
/// ///
/// > (needed in gathering metrics) /// > (needed in gathering metrics)
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct ContainerMetrics { pub struct ContainerMetrics {
container_id : String, container_id: String,
cpu_load : f32, cpu_load: f32,
ram_load : f32, ram_load: f32,
// pub net_activity : ??? // pub net_activity : ???
processes : Vec<String>, processes: Vec<String>,
} }
/// ## Container struct's constructor /// ## Container struct's constructor
impl ContainerMetrics { impl ContainerMetrics {
pub fn new(container_id : &str, cpu: f32, ram: f32, subsystems: Vec<String>,) -> Self{ pub fn new(container_id: &str, cpu: f32, ram: f32, subsystems: Vec<String>) -> Self {
ContainerMetrics { ContainerMetrics {
container_id : String::from(container_id), container_id: String::from(container_id),
cpu_load : cpu, cpu_load: cpu,
ram_load : ram, ram_load: ram,
processes : subsystems, processes: subsystems,
} }
} }
} }
/// # Process metrics struct /// # Process metrics struct
/// ## for gathering each process's all metrics /// ## for gathering each process's all metrics
/// ///
/// > (needed in gathering metrics) /// > (needed in gathering metrics)
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct ProcessMetrics { pub struct ProcessMetrics {
pub process_name : String, pub process_name: String,
cpu_load : f32, cpu_load: f32,
ram_load : f32, ram_load: f32,
} }
/// ## Process struct's constructor /// ## Process struct's constructor
impl ProcessMetrics { impl ProcessMetrics {
pub fn new(process_name :&str, cpu: f32, ram: f32) -> Self { pub fn new(process_name: &str, cpu: f32, ram: f32) -> Self {
ProcessMetrics { ProcessMetrics {
process_name : String::from(process_name), process_name: String::from(process_name),
cpu_load : cpu, cpu_load: cpu,
ram_load : ram, ram_load: ram,
} }
} }
} }
/// # Packet info struct /// # Packet info struct
/// ## for gathering info about container's net activity /// ## for gathering info about container's net activity
/// ///
/// > (needed in gathering metrics) /// > (needed in gathering metrics)
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct PacketInfo { pub struct PacketInfo {
protocol : String, protocol: String,
dst_ip : Ipv4Addr, dst_ip: Ipv4Addr,
src_ip : Ipv4Addr, src_ip: Ipv4Addr,
size : usize, size: usize,
} }
/// ## PacketInfo's constructor /// ## PacketInfo's constructor
impl PacketInfo { impl PacketInfo {
pub fn new(prt: String, dest: Ipv4Addr, src: Ipv4Addr, size_of_packet: usize) -> Self { pub fn new(prt: String, dest: Ipv4Addr, src: Ipv4Addr, size_of_packet: usize) -> Self {
PacketInfo { PacketInfo {
protocol : prt, protocol: prt,
dst_ip : dest, dst_ip: dest,
src_ip : src, src_ip: src,
size : size_of_packet, size: size_of_packet,
} }
} }
} }

View File

@ -1,39 +1,44 @@
pub mod bus;
pub mod files; pub mod files;
pub mod hagent; pub mod hagent;
pub mod metrics; pub mod metrics;
pub mod prcs; pub mod prcs;
pub mod services; pub mod services;
// TODO : saving current flags state use crate::options::structs::bus::{BusMessage, BusMessageContentType, InternalCli};
use crate::options::structs::Processes;
use crate::options::structs::{CustomError, TrackingProcess, Processes}; use async_trait::async_trait;
// use files::create_watcher; use files::v2::FilesController;
// use files::file_handler; use lazy_static::lazy_static;
// use inotify::Inotify; use log::{error, info};
use log::{error, warn, info}; use prcs::v2::ProcessesController;
use prcs::{ use services::v2::ServicesController;
freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process,
unfreeze_process,
};
// use services::service_handler;
use std::process::Command; use std::process::Command;
use std::sync::Arc; use std::sync::Arc;
// use tokio::join;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio::time::Duration; use tokio::time::Duration;
// use tokio::sync::mpsc::{Receiver as MpscReciever, Sender as MpscSender};
// controllers import
use prcs::v2::ProcessesController;
use files::v2::FilesController;
use services::v2::ServicesController;
use async_trait::async_trait;
const GET_ID_CMD: &str = "hostname"; lazy_static! {
static ref GET_ID_CMD: &'static str = "hostname";
}
// const GET_ID_CMD: &str = "hostname";
pub mod v2 { pub mod v2 {
use std::collections::{BTreeMap, HashMap, LinkedList, VecDeque};
use crate::options::structs::{Events, FileTriggersForController, ProcessUnit, Triggers};
use super::*; use super::*;
use crate::options::preboot::PrebootParams;
use crate::utils::metrics::processes::{ProcessesAll, ProcessesQuery};
use crate::{
options::structs::{
bus::CLiCommand, Events, FileTriggersForController, ProcessUnit, Triggers,
},
utils::metrics::processes::deps::{Dependencies, FilesExtended, ServicesExtended},
};
use std::any::Any;
use std::collections::{BTreeMap, HashMap, LinkedList, VecDeque};
type BusReciever = tokio::sync::mpsc::Receiver<BusMessage>;
type BusSender = Arc<tokio::sync::mpsc::Sender<BusMessage>>;
#[derive(Debug)] #[derive(Debug)]
enum ControllerResult { enum ControllerResult {
@ -44,19 +49,31 @@ pub mod v2 {
#[derive(Debug)] #[derive(Debug)]
struct Supervisor { struct Supervisor {
prcs : LinkedList<ProcessesController>, prcs: LinkedList<ProcessesController>,
files : LinkedList<FilesController>, files: LinkedList<FilesController>,
services : LinkedList<ServicesController>, services: LinkedList<ServicesController>,
config: Arc<Processes>,
bus: (BusReciever, BusSender),
} }
impl Supervisor { impl Supervisor {
pub fn new() -> Supervisor { pub fn new(bus_reciever: BusReciever, bus_sender: BusSender) -> Supervisor {
Supervisor { prcs: LinkedList::new(), files: LinkedList::new(), services: LinkedList::new()} Supervisor {
prcs: LinkedList::new(),
files: LinkedList::new(),
services: LinkedList::new(),
config: Arc::new(Processes::default()),
bus: (bus_reciever, bus_sender),
}
} }
pub async fn with_config(mut self, config: &Processes) -> Supervisor { pub async fn with_config(
let _ = config.processes.iter() mut self,
.for_each(|prc| { config: Processes,
let (rx, tx) = mpsc::channel::<Events>(10); preboot : Arc<PrebootParams>
) -> Supervisor {
self.config = Arc::from(config);
let _ = self.config.processes.iter().for_each(|prc| {
let (rx, tx) = mpsc::channel::<Events>(100);
let temp = ProcessesController::new(&prc.name, tx).with_exe(&prc.path); let temp = ProcessesController::new(&prc.name, tx).with_exe(&prc.path);
if !self.prcs.contains(&temp) { if !self.prcs.contains(&temp) {
self.prcs.push_back(temp); self.prcs.push_back(temp);
@ -64,15 +81,29 @@ pub mod v2 {
let rx = Arc::new(rx); let rx = Arc::new(rx);
let proc_name: Arc<str> = Arc::from(prc.name.clone()); let proc_name: Arc<str> = Arc::from(prc.name.clone());
let _ = prc.dependencies.files.iter() let _ = prc.dependencies.files.iter().for_each(|file| {
.for_each(|file| {
let mut hm = HashMap::new(); let mut hm = HashMap::new();
let triggers = FileTriggersForController { on_change: Arc::from(file.triggers.on_change.clone()), on_delete: Arc::from(file.triggers.on_delete.clone())}; let triggers = FileTriggersForController {
on_change: Arc::from(file.triggers.on_change.clone()),
on_delete: Arc::from(file.triggers.on_delete.clone()),
};
hm.insert(proc_name.clone(), (triggers, rx.clone())); hm.insert(proc_name.clone(), (triggers, rx.clone()));
let tempfile = FilesController::new(&file.filename.as_str(), hm) let backup_file = {
.with_path(&file.src); if file.triggers.do_restore {
use ulid::Ulid;
format!("{}{}.bak", {
let path = preboot.backup_folder.to_string_lossy();
if path.ends_with("/") { path.to_string() }
else { format!("{}/", path) }
}, Ulid::new())
} else {
String::new()
}
};
let tempfile =
FilesController::new(&file.filename.as_str(), hm).with_path(&file.src, backup_file);
if let Ok(file) = tempfile { if let Ok(file) = tempfile {
if let Some(current_file) = self.files.iter_mut().find(|a| &&file == a) { if let Some(current_file) = self.files.iter_mut().find(|a| &&file == a) {
@ -82,17 +113,15 @@ pub mod v2 {
} }
} }
}); });
// servs // servs
let _ = prc.dependencies.services.iter() let _ = prc.dependencies.services.iter().for_each(|serv| {
.for_each(|serv| { let access_url =
let access_url = ServicesController::get_access_url(&serv.hostname, serv.port.as_ref()); ServicesController::get_access_url(&serv.hostname, serv.port.as_ref());
// preparations // preparations
let rx = rx.clone(); let rx = rx.clone();
let serv_cont = ServicesController::new().with_access_name( let serv_cont =
&serv.hostname, ServicesController::new().with_access_name(&serv.hostname, &access_url);
&access_url
);
// triggers // triggers
let arc: Arc<str> = Arc::from(serv.triggers.on_lost.clone()); let arc: Arc<str> = Arc::from(serv.triggers.on_lost.clone());
let triggers = Triggers::new_service(arc, serv.triggers.wait); let triggers = Triggers::new_service(arc, serv.triggers.wait);
@ -104,12 +133,13 @@ pub mod v2 {
let mut vec: VecDeque<Arc<str>> = VecDeque::new(); let mut vec: VecDeque<Arc<str>> = VecDeque::new();
vec.push_back(proc_name.clone()); vec.push_back(proc_name.clone());
// connection_queue // connection_queue
let mut connection_queue: BTreeMap<u32, VecDeque<Arc<str>>> = BTreeMap::new(); let mut connection_queue: BTreeMap<u32, VecDeque<Arc<str>>> =
BTreeMap::new();
connection_queue.insert(serv.triggers.wait, vec); connection_queue.insert(serv.triggers.wait, vec);
// event_reg // event_reg
let mut hm = HashMap::new(); let mut hm = HashMap::new();
hm.insert(proc_name.clone(), (triggers, rx)); hm.insert(proc_name.clone(), (triggers, rx));
let serv_cont = serv_cont.with_params(connection_queue, hm); let serv_cont = serv_cont.with_params(connection_queue, hm);
self.services.push_back(serv_cont); self.services.push_back(serv_cont);
} }
@ -118,42 +148,211 @@ pub mod v2 {
self self
} }
pub fn get_stats(&self) -> String { pub fn get_stats(&self) -> String {
format!("processes: {}, files: {}, services: {}", self.prcs.len(),self.files.len(), self.services.len()) format!(
"processes: {}, files: {}, services: {}",
self.prcs.len(),
self.files.len(),
self.services.len()
)
}
pub async fn extract_extended_procs(
config: Arc<Processes>,
prcs_list: &LinkedList<ProcessesController>,
files_list: &LinkedList<FilesController>,
servs_list: &LinkedList<ServicesController>,
) -> Vec<ProcessesAll> {
let mut procs = Vec::new();
for prc in config.processes.iter() {
if let Some(prc_cont) = prcs_list
.iter()
.find(|&prc_cont| prc.name == *prc_cont.name)
{
let mut vec_files = Vec::new();
let mut vec_services = Vec::new();
prc.dependencies
.files
.iter()
.map(|file| (file, format!("{}{}", file.src, file.filename)))
.for_each(|(file, code_name)| {
if let Some(file_cont) = files_list
.iter()
.find(|&file_cont| *file_cont.get_code_name() == code_name)
{
vec_files.push(FilesExtended {
name: file.filename.to_string(),
path: file.src.to_string(),
status: file_cont.get_state(),
backup_file : file_cont.get_backup_file(),
triggers: file.triggers.to_owned(),
});
}
});
prc.dependencies
.services
.iter()
.map(|serv| {
(
serv,
format!("{}{}", serv.hostname, {
if let Some(port) = serv.port {
format!(":{}", port)
} else {
String::new()
}
}),
)
})
.for_each(|(serv, acces_url)| {
if let Some(serv_cont) = servs_list
.iter()
.find(|&serv_cont| *serv_cont.get_arc_access_url() == acces_url)
{
vec_services.push(ServicesExtended {
name: serv.hostname.to_owned(),
access_name: (*serv_cont.get_arc_access_url()).to_owned(),
status: serv_cont.get_state(),
triggers: serv.triggers.to_owned(),
});
}
});
procs.push(ProcessesAll {
name: prc_cont.name.clone().to_string(),
state: prc_cont.get_state(),
pid: prc_cont.get_pid(),
dependencies: Dependencies {
files: vec_files,
services: vec_services,
},
});
}
}
procs
} }
} }
#[async_trait] #[async_trait]
impl ProcessUnit for Supervisor { impl ProcessUnit for Supervisor {
async fn process(&mut self) { async fn process(&mut self) {
info!("Initializing monitoring ..."); info!("Initializing monitoring ...");
loop { loop {
// dbg!(&self); //
let rec = &mut self.bus.0;
while let Ok(request) = rec.try_recv() {
if let BusMessage::Request(_, _, cont) = request {
let cont: Box<dyn Any + Send> = cont;
match cont.downcast::<InternalCli>() {
Ok(cli) => {
let mut count = 0;
let fut = (&mut self.prcs)
.into_iter()
.find(|prc| prc.name == Arc::from(cli.prc.as_ref()))
.map(|prc| async {
let count = &mut count;
*count += 1;
let res = match cli.cmd {
CLiCommand::Start => prc.start_by_user_call().await,
CLiCommand::Stop => prc.stop_by_user_call().await,
CLiCommand::Restart => prc.restart_by_user_call().await,
CLiCommand::Freeze => prc.freeze_by_user_call().await,
CLiCommand::Unfreeze => {
prc.unfreeze_by_user_call().await
}
};
let sender = self.bus.1.clone();
let resp_content = match res {
Ok(_) => Ok(format!(
"Ok on user call abour process {}",
prc.name
)),
Err(er) => Err(anyhow::Error::msg(format!(
"Error: User call for process {} failed : {}",
prc.name, er
))),
};
let _ = sender.send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::new(resp_content)
)).await;
1
});
if let Some(fut) = fut {
fut.await;
} else {
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToCli,
BusMessageContentType::RawString,
Box::new(
Err(anyhow::Error::msg(format!("No process named `{}` was found in controlled scope", cli.prc)))
)
)).await;
}
}
Err(boxed) => {
if let Ok(query) = boxed.downcast::<ProcessesQuery>() {
match *query {
ProcessesQuery::QueryAll => {
let procs = Self::extract_extended_procs(
self.config.clone(),
&self.prcs,
&self.files,
&self.services,
)
.await;
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToMetrics,
BusMessageContentType::ProcessQuery,
Box::new(
ProcessesQuery::All(procs)
)
)).await;
}
ProcessesQuery::QueryGeneral => {
let mut vec = Vec::new();
for prc in &self.prcs {
vec.push(prc.get_general_info().await);
}
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToMetrics,
BusMessageContentType::ProcessQuery,
Box::new(
ProcessesQuery::General(vec)
)
)).await;
}
_ => {
let _ = self.bus.1.clone().send(BusMessage::Response(
crate::options::structs::bus::BusMessageDirection::ToCli,
BusMessageContentType::RawString,
Box::new(
Err(anyhow::Error::msg("Unknown request format was send to the Supervisor"))
)
)).await;
}
}
}
}
}
}
}
let mut tasks: Vec<tokio::task::JoinHandle<ControllerResult>> = vec![]; let mut tasks: Vec<tokio::task::JoinHandle<ControllerResult>> = vec![];
// let (mut prc, mut file, mut serv) = (self.prcs.pop_front().unwrap(), self.files.pop_front().unwrap(), self.services.pop_front().unwrap());
// let res = tokio::join!(prc.process(), file.process(), serv.process());
if let Some(mut val) = self.prcs.pop_front() { if let Some(mut val) = self.prcs.pop_front() {
tasks.push( tasks.push(tokio::spawn(async move {
tokio::spawn( async move { val.process().await;
val.process().await; ControllerResult::Process(Some(val))
ControllerResult::Process(Some(val)) }));
})
);
} }
if let Some(mut val) = self.files.pop_front() { if let Some(mut val) = self.files.pop_front() {
tasks.push( tasks.push(tokio::spawn(async move {
tokio::spawn( async move { val.process().await;
val.process().await; ControllerResult::File(Some(val))
ControllerResult::File(Some(val)) }));
})
);
} }
if let Some(mut val) = self.services.pop_front() { if let Some(mut val) = self.services.pop_front() {
tasks.push( tasks.push(tokio::spawn(async move {
tokio::spawn( async move { val.process().await;
val.process().await; ControllerResult::Service(Some(val))
ControllerResult::Service(Some(val)) }));
})
);
} }
for task in tasks { for task in tasks {
match task.await { match task.await {
@ -169,254 +368,25 @@ pub mod v2 {
} }
} }
// spawn tasks
// spawn prc
// spawn files
// spawn services
// ## for ... i.await in loop
pub async fn init_monitoring( pub async fn init_monitoring(
config: Processes config: Processes,
) -> anyhow::Result<()> { preboot : Arc<PrebootParams>,
let mut supervisor = Supervisor::new().with_config(&config).await; bus_reciever: BusReciever,
bus_sender: BusSender,
) -> anyhow::Result<()> {
let mut supervisor = Supervisor::new(bus_reciever, bus_sender)
.with_config(config, preboot)
.await;
info!("Monitoring: {} ", &supervisor.get_stats()); info!("Monitoring: {} ", &supervisor.get_stats());
supervisor.process().await; supervisor.process().await;
Ok(()) Ok(())
} }
// async fn generate_controllers<'a>(config: Processes) -> (HashSet<ProcessesController<'a>>, HashSet<FilesController<'a>>, HashSet<ServicesController<'a>>) {
// let mut prcs: HashSet<ProcessesController<'a>> = HashSet::new();
// let mut files: HashSet<FilesController<'a>> = HashSet::new();
// let mut services: HashSet<ServicesController<'a>> = HashSet::new();
// for prc in config.processes {
// let (rx, tx) = mpsc::channel::<Events<'a>>(10);
// // let new_prc = ProcessesController::new(&prc.name, tx).with_exe(prc.path);
// let mut new_prc = ProcessesController::new("&prc.name", tx).with_exe(prc.path);
// let a = new_prc.process().await;
// }
// (prcs, files, services)
// }
// spawn prc check with semaphore check
async fn prcs_monitoriing() -> anyhow::Result<()> { Ok(()) }
// spawn file check with semaphore check
async fn files_monitoriing() -> anyhow::Result<()> { Ok(()) }
// spawn service check with semaphore check
async fn services_monitoriing() -> anyhow::Result<()> { Ok(()) }
} }
/// # Fn `run_daemons`
/// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `&mut mpsc::Receiver<u8>`,
///
/// *output* : ()
///
/// *initiator* : main thread
///
/// *managing* : Arc to current process struct, Arc to managing channel writer, mut ref to managing channel reader
///
/// *depends on* : all module `prcs`'s functions, fn `running_handler`, fn `utils::files::create_watcher`
///
/// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process
///
// pub async fn run_daemons(
// proc: Arc<TrackingProcess>,
// tx: Arc<mpsc::Sender<u8>>,
// rx: &mut mpsc::Receiver<u8>,
// ) {
// // creating watchers + ---buffers---
// let mut watchers: Vec<Inotify> = vec![];
// for file in proc.dependencies.files.clone().into_iter() {
// if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
// watchers.push(watcher);
// } else {
// let _ = tx.send(121).await;
// }
// // watchers.push(create_watcher(&file.filename, &file.src).await.unwrap());
// }
// let watchers_clone: Arc<tokio::sync::Mutex<Vec<Inotify>>> =
// Arc::new(tokio::sync::Mutex::new(watchers));
// loop {
// let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone());
// tokio::select! {
// _ = run_hand => continue,
// _val = rx.recv() => {
// if process_protocol_symbol(proc.clone(), _val.unwrap()).await.is_err() {
// return;
// }
// },
// }
// tokio::task::yield_now().await;
// }
// }
async fn process_protocol_symbol(proc: Arc<TrackingProcess>, val: u8) -> Result<(), CustomError>{
match val {
// 1 - File-dependency handling error -> terminating (after waiting)
1 => {
if is_active(&proc.name).await {
error!("File-dependency handling error: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(500)).await;
}
// return;
},
// 2 - File-dependency handling error -> holding (after waiting)
2 => {
if !is_frozen(&proc.name).await {
error!("File-dependency handling error: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
}
},
// 3 - Running process error
3 => {
error!("Error due to starting {} process", &proc.name);
return Err(CustomError::Fatal)
},
// 4 - Timeout of waiting service-dependency -> staying (after waiting)
4 => {
// warn!("Timeout of waiting service-dependency: Ignoring on {} process ..." , &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 5 - Timeout of waiting service-dependency -> terminating (after waiting)
5 => {
if is_active(&proc.name).await {
error!("Timeout of waiting service-dependency: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(500)).await;
}
},
// 6 - Timeout of waiting service-dependency -> holding (after waiting)
6 => {
// println!("holding {}-{}", proc.name, is_active(&proc.name).await);
if !is_frozen(&proc.name).await {
error!("Timeout of waiting service-dependency: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_secs(1)).await;
}
},
// // 7 - File-dependency change -> terminating (after check)
7 => {
error!("File-dependency warning (file changed). Terminating {} process...", &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
return Err(CustomError::Fatal)
},
// // 8 - File-dependency change -> restarting (after check)
8 => {
warn!("File-dependency warning (file changed). Restarting {} process...", &proc.name);
let _ = restart_process(&proc.name, &proc.path).await;
tokio::time::sleep(Duration::from_millis(100)).await;
},
// // 9 - File-dependency change -> staying (after check)
9 => {
warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 10 - Process unfreaze call via file handler (or service handler)
10 | 11 => {
if is_frozen(&proc.name).await {
warn!("Unfreezing process {} call...", &proc.name);
unfreeze_process(&proc.name).await;
}
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 11 - Process unfreaze call via service handler
// 11 => {
// if is_frozen(&proc.name).await {
// warn!("Unfreezing process {} call...", &proc.name);
// unfreeze_process(&proc.name).await;
// }
// tokio::time::sleep(Duration::from_millis(100)).await;
// },
// 101 - Impermissible trigger values in JSON
101 => {
error!("Impermissible trigger values in JSON in {}'s block. Killing thread...", &proc.name);
if is_active(&proc.name).await {
terminate_process(&proc.name).await;
}
return Err(CustomError::Fatal)
},
//
// 121 - Cannot create valid watcher for file dependency
// todo : think about valid situation
121 => {
error!("Cannot create valid watcher for file dependency. Terminating {} process...", &proc.name);
let _ = terminate_process(&proc.name).await;
return Err(CustomError::Fatal)
},
// 111 - global thread termination with killing current child in a face
// of a current process
111 => {
warn!("Terminating {}'s child processes...", &proc.name);
match is_active(&proc.name).await {
true => {
terminate_process(&proc.name).await;
},
false => {
log::info!("Process {} is already terminated!", proc.name);
},
}
},
_ => {},
}
Ok(())
}
// check process status daemon
/// # Fn `run_daemons`
/// ## func to async exec subjobs of checking process, services and files states
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `Arc<tokio::sync::Mutex<Vec<Inotify>>>`
///
/// *output* : ()
///
/// *initiator* : fn `run_daemons`
///
/// *managing* : Arc to current process struct, Arc to Mutex to list of file watchers
///
/// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}`
///
// pub async fn running_handler(
// prc: Arc<TrackingProcess>,
// tx: Arc<mpsc::Sender<u8>>,
// watchers: Arc<tokio::sync::Mutex<Vec<Inotify>>>,
// ) {
// // services and files check (once)
// let files_check = file_handler(
// &prc.name,
// &prc.dependencies.files,
// tx.clone(),
// watchers.clone(),
// );
// let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone());
// let res = join!(files_check, services_check);
// // if inactive -> spawn checks -> active is true
// if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
// if start_process(&prc.name, &prc.path).await.is_err() {
// tx.send(3).await.unwrap();
// return;
// }
// }
// // if frozen -> spawn checks -> unfreeze is true
// else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
// tx.send(10).await.unwrap();
// return;
// }
// // tokio::time::sleep(Duration::from_millis(100)).await;
// tokio::task::yield_now().await;
// }
// todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}' // todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}'
/// # Fn `get_container_id` /// # Fn `get_container_id`
/// ## for getting container id used in logs /// ## for getting container id used in logs
/// ///
/// *input* : - /// *input* : -
/// ///
/// *output* : Some(String) if cont-id was grubbed | None - if not /// *output* : Some(String) if cont-id was grubbed | None - if not
@ -426,9 +396,9 @@ async fn process_protocol_symbol(proc: Arc<TrackingProcess>, val: u8) -> Result<
/// *managing* : - /// *managing* : -
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub fn get_container_id() -> Option<String> { pub fn get_container_id() -> Option<String> {
match Command::new(GET_ID_CMD).output() { match Command::new(*GET_ID_CMD).output() {
Ok(output) => { Ok(output) => {
if !output.status.success() { if !output.status.success() {
return None; return None;
@ -437,7 +407,7 @@ pub fn get_container_id() -> Option<String> {
if id.is_empty() { if id.is_empty() {
return None; return None;
} }
Some(String::from_utf8_lossy(&output.stdout).to_string()) Some(String::from_utf8_lossy(&output.stdout).trim().to_string())
} }
Err(_) => None, Err(_) => None,
} }

97
noxis-rs/src/utils/bus.rs Normal file
View File

@ -0,0 +1,97 @@
use std::sync::Arc;
use crate::options::structs::bus::{BusMessage, BusMessageDirection};
use crate::options::structs::ProcessUnit;
use log::{error, trace};
use tokio::sync::mpsc::{Receiver, Sender};
type Inner = Receiver<BusMessage>;
type Outter = Arc<Sender<BusMessage>>;
#[derive(Debug)]
pub struct Highway {
to_cli: Outter,
to_supervisor: Outter,
to_metrics: Outter,
}
impl Highway {
fn new(to_cli: Outter, to_supervisor: Outter, to_metrics: Outter) -> Self {
Self {
to_cli,
to_supervisor,
to_metrics,
}
}
async fn send(&self, msg: BusMessage) -> anyhow::Result<()> {
let dir = match &msg {
BusMessage::Request(dir, ..) | BusMessage::Response(dir, ..) => {
trace!("redirecting message to {:?} ...", dir);
dir
}
};
match dir {
BusMessageDirection::ToCli => self.send_cli(msg).await,
BusMessageDirection::ToSupervisor => self.send_supervisor(msg).await,
BusMessageDirection::ToMetrics => self.send_metrics(msg).await,
}
}
async fn send_cli(&self, msg: BusMessage) -> anyhow::Result<()> {
self.to_cli.send(msg).await?;
Ok(())
}
async fn send_supervisor(&self, msg: BusMessage) -> anyhow::Result<()> {
self.to_supervisor.send(msg).await?;
Ok(())
}
async fn send_metrics(&self, msg: BusMessage) -> anyhow::Result<()> {
self.to_metrics.send(msg).await?;
Ok(())
}
}
pub struct Bus {
inner: Inner,
highway: Highway,
}
impl Bus {
pub fn new(inner: Inner, to_cli: Outter, to_supervisor: Outter, to_metrics: Outter) -> Self {
Self {
inner,
highway: Highway::new(to_cli, to_supervisor, to_metrics),
}
}
}
#[async_trait::async_trait]
impl ProcessUnit for Bus {
async fn process(&mut self) {
loop {
while let Ok(content) = self.inner.try_recv() {
// debug!("new message to the Bus : {:?}", &content);
let msg = match content {
BusMessage::Request(direction, content_type, content) => {
trace!(
"bus has got a new Request with direction {:?} and type {:?}",
&direction,
&content_type
);
BusMessage::Request(direction, content_type, content)
}
BusMessage::Response(direction, content_type, content) => {
trace!(
"bus has got a new Response with direction {:?} and type {:?}",
&direction,
&content_type
);
BusMessage::Response(direction, content_type, content)
}
};
if let Err(er) = self.highway.send(msg).await {
error!("Cannot redirect message : {}", er);
}
}
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
}
}
}

View File

@ -1,321 +1,293 @@
use crate::options::structs::{CustomError, Files}; use crate::options::structs::CustomError;
use super::prcs::{is_active, is_frozen}; use crate::options::structs::Events;
use inotify::{EventMask, Inotify, WatchMask}; use async_trait::async_trait;
use std::borrow::BorrowMut; use inotify::{EventMask, Inotify, WatchMask};
use std::path::Path; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc::Sender;
use tokio::sync::mpsc::Sender as Sender;
use tokio::time::Duration;
use crate::options::structs::Events;
use async_trait::async_trait;
pub mod v2 { pub mod v2 {
use log::{error, info, warn}; use super::*;
use crate::options::structs::{FileTriggerType, FileTriggersForController as Triggers, ProcessUnit}; use crate::options::structs::{
use super::*; FileTriggerType, FileTriggersForController as Triggers, ProcessUnit,
use std::{collections::HashMap, path::Path}; };
use log::{error, info, warn};
use serde::Serialize;
use std::{collections::HashMap, path::Path};
type MpscSender = Arc<Sender<Events>>; type MpscSender = Arc<Sender<Events>>;
type EventHandlers = HashMap<Arc<str>, (Triggers, MpscSender)>; type EventHandlers = HashMap<Arc<str>, (Triggers, MpscSender)>;
#[derive(Debug)] #[derive(Debug, Serialize, Clone, Copy)]
enum FileState { pub enum FileState {
Ok, Ok,
NotFound, NotFound,
}
#[derive(Debug)]
pub struct FilesController {
name: Arc<str>,
path: String,
code_name: Arc<str>,
backup_file : String,
state: FileState,
watcher: Option<Inotify>,
triggers: EventHandlers,
}
impl PartialEq for FilesController {
fn eq(&self, other: &Self) -> bool {
self.code_name == other.code_name
} }
}
#[derive(Debug)] impl FilesController {
pub struct FilesController { #[inline(always)]
name : Arc<str>, pub fn new(name: &str, triggers: EventHandlers) -> FilesController {
path : String, let name: Arc<str> = Arc::from(name);
code_name : Arc<str>, Self {
state : FileState, name: name.clone(),
watcher : Option<Inotify>, path: String::new(),
triggers : EventHandlers, state: FileState::Ok,
} watcher: None,
triggers,
impl PartialEq for FilesController { code_name: name.clone(),
fn eq(&self, other: &Self) -> bool { backup_file: String::new(),
self.code_name == other.code_name
} }
} }
#[inline(always)]
impl FilesController { pub fn with_path(mut self, path: impl AsRef<Path>, backup : String) -> anyhow::Result<FilesController> {
pub fn new(name: &str, triggers: EventHandlers) -> FilesController { self.path = path.as_ref().to_string_lossy().into_owned();
let name: Arc<str> = Arc::from(name); self.watcher = {
Self { match create_watcher(&self.name, &self.path) {
name : name.clone(), Ok(val) => Some(val),
path : String::new(), Err(er) => {
state : FileState::Ok, error!(
watcher : None, "Cannot create watcher for {} ({}) due to {}",
triggers, self.name, &self.path, er
code_name : name.clone(), );
return Err(er);
}
} }
};
self.code_name = Arc::from(format!("{}{}", &self.path, &self.code_name));
self.backup_file = backup;
match create_backup(&self.code_name, &self.backup_file) {
Ok(_) => info!("Backup file for {} was created ({})", &self.code_name, &self.backup_file),
Err(er) => warn!("{}. Ignoring ...", er),
} }
pub fn with_path(mut self, path: impl AsRef<Path>) -> anyhow::Result<FilesController> { Ok(self)
self.path = path.as_ref().to_string_lossy().into_owned(); }
self.watcher = { pub fn add_event(&mut self, file_controller: FilesController) {
match create_watcher(&self.name, &self.path) { for (k, v) in file_controller.triggers {
Ok(val) => Some(val), self.triggers.entry(k).or_insert(v);
Err(er) => { }
error!("Cannot create watcher for {} ({}) due to {}", self.name, &self.path, er); }
return Err(er) async fn trigger_on(&mut self, trigger_type: Option<FileTriggerType>) {
} for (prc_name, (triggers, channel)) in &self.triggers {
let msg = match &trigger_type {
None => Events::Positive(self.code_name.clone()),
Some(event) => {
info!(
"Event on file {} ({}) : {}. Notifying `{}` ...",
&self.name, &self.path, event, &prc_name
);
event.event_from_file_trigger_controller(self.code_name.clone(), &triggers)
} }
}; };
self.code_name = Arc::from(format!("{}{}", &self.path, &self.code_name)); let _ = channel.send(msg).await;
Ok(self)
}
pub fn add_event(&mut self, file_controller : FilesController) {
for (k, v) in file_controller.triggers {
self.triggers.entry(k).or_insert(v);
}
}
async fn trigger_on(&mut self, trigger_type: Option<FileTriggerType>) {
for (prc_name, (triggers, channel)) in &self.triggers {
let msg = match &trigger_type {
None => {
Events::Positive(self.code_name.clone())
},
Some(event) => {
info!("Event on file {} ({}) : {}. Notifying `{}` ...", &self.name, &self.path, event, &prc_name);
event.event_from_file_trigger_controller(self.code_name.clone(), &triggers)
},
};
let _ = channel.send(msg).await;
}
} }
} }
#[async_trait] pub fn get_state(&self) -> FileState {
impl ProcessUnit for FilesController { self.state
async fn process(&mut self) { }
// polling file check pub fn get_code_name(&self) -> Arc<str> {
// 1) existing check self.code_name.clone()
// dbg!(&self); }
if let Ok(_) = check_file(&self.name, &self.path).await { pub fn get_backup_file(&self) -> String {
if let FileState::NotFound = self.state { self.backup_file.to_string()
info!("File {} ({}) was found in determined scope. Notifying ...", self.name, self.code_name); }
self.state = FileState::Ok; }
// reseting negative outcome in prc #[async_trait]
self.trigger_on(None).await; impl ProcessUnit for FilesController {
} async fn process(&mut self) {
match &mut self.watcher { if let Ok(_) = check_file(&self.name, &self.path).await {
Some(notify) => { if let FileState::NotFound = self.state {
let mut buffer = [0; 1024]; info!(
if let Ok(mut notif_events) = notify.read_events(&mut buffer) { "File {} ({}) was found in determined scope. Notifying ...",
// notif_events.into_iter().for_each(|mask| {dbg!(&mask.mask);}); self.name, self.code_name
// todo!(); );
if let (recreate_watcher, true) = ( self.state = FileState::Ok;
notif_events.any(|mask| mask.mask == EventMask::DELETE_SELF), self.trigger_on(None).await;
notif_events.any(|mask| mask.mask == EventMask::MODIFY) }
) { match &mut self.watcher {
Some(notify) => {
let mut buffer = [0; 128];
if let Ok(notif_events) = notify.read_events(&mut buffer) {
let (need_to_recreate, was_modifired) =
notif_events.fold((false, false), |(a, b), mask| {
(
a || mask.mask == EventMask::DELETE_SELF,
b || mask.mask == EventMask::MODIFY,
)
}
);
if self.backup_file.is_empty() {
} else {
}
if let (mut recreate_watcher, true) = (need_to_recreate, was_modifired) {
if self.backup_file.is_empty() {
warn!("File {} ({}) was changed", self.name, &self.path); warn!("File {} ({}) was changed", self.name, &self.path);
if recreate_watcher { self.trigger_on(Some(FileTriggerType::OnChange)).await;
self.watcher = match create_watcher(&self.name, &self.path) { } else {
Ok(notifier) => Some(notifier), recreate_watcher = true;
Err(er) => { match restore_file(&self.code_name, &self.backup_file).await {
error!("Failed to recreate watcher for {} ({}) due to {}", Ok(_) => info!("File {} was successfully restored", &self.code_name),
self.name, Err(er) => error!("Cannot restore file {} : {}", &self.code_name, er),
&self.path, }
er }
); if recreate_watcher {
None self.watcher = match create_watcher(&self.name, &self.path) {
}, Ok(notifier) => Some(notifier),
Err(er) => {
error!(
"Failed to recreate watcher for {} ({}) due to {}",
self.name, &self.path, er
);
None
} }
} }
self.trigger_on(Some(FileTriggerType::OnChange)).await;
return;
} }
} }
}, }
None => { /* DEAD END */},
} }
} else { None => return,
if let FileState::Ok = self.state { }
warn!("File {} ({}) was not found in determined scope", self.name, &self.path); } else {
if let FileState::Ok = self.state {
if self.backup_file.is_empty() {
warn!(
"File {} ({}) was not found in determined scope",
self.name, &self.path
);
self.state = FileState::NotFound; self.state = FileState::NotFound;
self.trigger_on(Some(FileTriggerType::OnDelete)).await; self.trigger_on(Some(FileTriggerType::OnDelete)).await;
} } else {
return; warn!(
} "File {} ({}) was not found in determined scope. Restoring from backup-file ...",
self.trigger_on(None).await; self.name, &self.path
// 2) change check );
} match restore_file(&self.code_name, &self.backup_file).await {
} Err(er) => error!("Cannot restore file {} : {}", &self.code_name, er),
} Ok(_) => {
info!("File {} was successfully restored", &self.code_name);
/// # Fn `create_watcher` self.watcher = match create_watcher(&self.name, &self.path) {
/// ## for creating watcher on file's delete | update events Ok(notifier) => Some(notifier),
/// Err(er) => {
/// *input* : `&str`, `&str` error!(
/// "Failed to recreate watcher for {} ({}) : {}",
/// *output* : `Err` if it cant create file watcher | `Ok(watcher)` on successfull construction self.name, &self.path, er
/// );
/// *initiator* : fn `file_handler`, fn `utils::run_daemons` None
/// }
/// *managing* : current file's name: &str, path in local storage to current file: &str
///
/// *depends on* : -
///
pub fn create_watcher(filename: &str, path: &str) -> anyhow::Result<Inotify> {
let src = format!("{}{}", path, filename);
let inotify: Inotify = Inotify::init()?;
inotify.watches().add(&src, WatchMask::ALL_EVENTS)?;
Ok(inotify)
}
/// # Fn `create_watcher`
/// ## for managing processes by checking dep files' states
///
/// *input* : `&str`, `&[Files]`, `Arc<mpsc::Sender<u8>>`, `Arc<tokio::sync::Mutex<Vec<Inotify>>>`
///
/// *output* : `Err` if something with dep file is wrong | `Ok(())` on successfull dep file check
///
/// *initiator* : fn `utils::running_handler`
///
/// *managing* : current process's name: &str, list of dep files : `&[Files]`, atomic ref counter on sender main channel for current process `Arc<mpsc::Sender<u8>>`, mut list of file watchers`Arc<tokio::sync::Mutex<Vec<Inotify>>>`
///
/// *depends on* : Files
///
pub async fn file_handler(
name: &str,
files: &[Files],
tx: Arc<mpsc::Sender<u8>>,
watchers: Arc<tokio::sync::Mutex<Vec<Inotify>>>,
) -> anyhow::Result<()> {
for (i, file) in files.iter().enumerate() {
// let src = format!("{}{}", file.src, file.filename);
if check_file(&file.filename, &file.src).await.is_err() {
if !is_active(name).await || is_frozen(name).await {
return Err(anyhow::Error::msg("Process is frozen or stopped"));
}
match file.triggers.on_delete.as_str() {
"stay" => {
tx.send(9).await.unwrap();
continue;
}
"stop" => {
if is_active(name).await {
tx.send(1).await.unwrap();
}
return Err(anyhow::Error::msg("Process was stopped"));
}
"hold" => {
if is_active(name).await {
tx.send(2).await.unwrap();
return Err(anyhow::Error::msg("Process was frozen"));
}
}
_ => {
tokio::time::sleep(Duration::from_millis(50)).await;
tx.send(101).await.unwrap();
return Err(anyhow::Error::msg("Impermissible character or word in file trigger"));
}
}
} else if is_active(name).await && !is_frozen(name).await {
let watchers = watchers.clone();
// println!("mutex: {:?}", watchers);
let mut buffer = [0; 128];
let mut mutex_guard = watchers.lock().await;
if let Some(notify) = mutex_guard.get_mut(i) {
let events = notify.read_events(&mut buffer);
// println!("{:?}", events);
if events.is_ok() {
let events: Vec<EventMask> = events
.unwrap()
.map(|mask| mask.mask)
.filter(|mask| {
*mask == EventMask::MODIFY || *mask == EventMask::DELETE_SELF
})
.collect();
for event in events {
if let EventMask::DELETE_SELF = event {
// ! warning (DELETE_SELF event) !
// println!("! warning (DELETE_SELF event) !");
// * watcher recreation after dealing with file recreation mechanism in text editors
let mutex = notify.borrow_mut();
// *mutex = create_watcher(&file.filename, &file.src).await.unwrap();
if let Ok(watcher) = create_watcher(&file.filename, &file.src) {
*mutex = watcher;
} }
} },
match file.triggers.on_change.as_str() {
"stop" => {
let _ = tx.send(7).await;
}
"restart" => {
let _ = tx.send(8).await;
}
"stay" => {
let _ = tx.send(9).await;
}
_ => {
let _ = tx.send(101).await;
}
}
} }
} }
} }
return;
} }
self.trigger_on(None).await;
} }
tokio::task::yield_now().await;
Ok(())
} }
}
/// # Fn `check_file` pub fn create_backup(target: &str, backup: &str) -> anyhow::Result<u64> {
/// ## for checking existance of current file return if !backup.is_empty() {
/// Ok(std::fs::copy(target, backup)?)
/// *input* : `&str`, `&str` } else {
/// Err(anyhow::Error::msg(format!("No need to create backup-file for {}", target)))
/// *output* : `Ok(())` if file exists | `Err(_)` if not | panic on fs error
///
/// *initiator* : fn `file_handler`
///
/// *managing* : current file's name: `&str` and current file's path in local storage: `&str`
///
/// *depends on* : network activity
///
pub async fn check_file(filename: &str, path: &str) -> Result<(), CustomError> {
let arc_name = Arc::new(filename.to_string());
let arc_path = Arc::new(path.to_string());
tokio::task::spawn_blocking(move || {
let file_concat = format!("{}{}", arc_path, arc_name);
let path = Path::new(&file_concat);
if path.exists() {
Ok(())
} else {
Err(CustomError::Fatal)
}
})
.await
.unwrap_or_else(|_| {
panic!("Corrupted while file check process");
})
} }
}
#[cfg(test)] pub async fn restore_file(target: &str, backup: &str) -> anyhow::Result<u64> {
mod files_unittests { Ok(tokio::fs::copy(backup, target).await?)
use super::*; }
#[tokio::test]
async fn try_to_create_watcher() { /// # Fn `create_watcher`
let res = create_watcher("dep-file", "./tests/examples/"); /// ## for creating watcher on file's delete | update events
assert!(res.is_ok()); ///
} /// *input* : `&str`, `&str`
#[tokio::test] ///
async fn try_to_create_invalid_watcher() { /// *output* : `Err` if it cant create file watcher | `Ok(watcher)` on successfull construction
let res = create_watcher("invalid-file", "/path/to/the/no/dir"); ///
assert!(res.is_err()); /// *initiator* : fn `file_handler`, fn `utils::run_daemons`
} ///
#[tokio::test] /// *managing* : current file's name: &str, path in local storage to current file: &str
async fn check_existing_file() { ///
let res = check_file("dep-file", "./tests/examples/").await; /// *depends on* : -
assert!(res.is_ok()); ///
} pub fn create_watcher(filename: &str, path: &str) -> anyhow::Result<Inotify> {
#[tokio::test] let src = format!("{}{}", path, filename);
async fn check_non_existing_file() { let inotify: Inotify = Inotify::init()?;
let res = check_file("invalid-file", "/path/to/the/no/dir").await; inotify.watches().add(&src, WatchMask::ALL_EVENTS)?;
assert!(res.is_err()); Ok(inotify)
}
/// # Fn `check_file`
/// ## for checking existance of current file
///
/// *input* : `&str`, `&str`
///
/// *output* : `Ok(())` if file exists | `Err(_)` if not | panic on fs error
///
/// *initiator* : fn `file_handler`
///
/// *managing* : current file's name: `&str` and current file's path in local storage: `&str`
///
/// *depends on* : network activity
///
pub async fn check_file(filename: &str, path: &str) -> Result<(), CustomError> {
let arc_name = Arc::new(filename.to_string());
let arc_path = Arc::new(path.to_string());
tokio::task::spawn_blocking(move || {
let file_concat = format!("{}{}", arc_path, arc_name);
let path = Path::new(&file_concat);
if path.exists() {
Ok(())
} else {
Err(CustomError::Fatal)
} }
})
.await
.unwrap_or_else(|_| {
panic!("Corrupted while file check process");
})
}
#[cfg(test)]
mod files_unittests {
use super::*;
#[tokio::test]
async fn try_to_create_watcher() {
let res = create_watcher("dep-file", "./tests/examples/");
assert!(res.is_ok());
} }
#[tokio::test]
async fn try_to_create_invalid_watcher() {
let res = create_watcher("invalid-file", "/path/to/the/no/dir");
assert!(res.is_err());
}
#[tokio::test]
async fn check_existing_file() {
let res = check_file("dep-file", "./tests/examples/").await;
assert!(res.is_ok());
}
#[tokio::test]
async fn check_non_existing_file() {
let res = check_file("invalid-file", "/path/to/the/no/dir").await;
assert!(res.is_err());
}
}

View File

@ -1,15 +1,15 @@
// //
// module needed to check host-agent health condition and to communicate with it // module needed to check host-agent health condition and to communicate with it
// //
use anyhow::{Error, Ok, Result};
use tokio::{io::Interest, net::UnixStream}; use tokio::{io::Interest, net::UnixStream};
use anyhow::{Ok, Result, Error}; // to kill lint bug
// to kill lint bug
#[allow(unused_imports)] #[allow(unused_imports)]
use tokio::net::UnixListener; use tokio::net::UnixListener;
/// # Fn `open_unix_socket` /// # Fn `open_unix_socket`
/// ## opening unix-socket for host-agent communication /// ## opening unix-socket for host-agent communication
/// ///
/// *input* : - /// *input* : -
/// ///
/// *output* : `Ok(socket)` if socket was successfully opened | `Err(er)` if not /// *output* : `Ok(socket)` if socket was successfully opened | `Err(er)` if not
@ -19,7 +19,7 @@ use tokio::net::UnixListener;
/// *managing* : - /// *managing* : -
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)] #[allow(dead_code)]
async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error> { async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error> {
// "/var/run/enode/hostagent.sock" // "/var/run/enode/hostagent.sock"
@ -27,18 +27,18 @@ async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error>
} }
/// # Fn `ha_healthcheck` /// # Fn `ha_healthcheck`
/// ## for checking host-agent state /// ## for checking host-agent state
/// ///
/// *input* : `&UnixStream` /// *input* : `&UnixStream`
/// ///
/// *output* : `Ok(()))` if host-agent is running | `Err(er)` if not /// *output* : `Ok(()))` if host-agent is running | `Err(er)` if not
/// ///
/// *initiator* : main thread `(??)` /// *initiator* : main thread `(??)`
/// ///
/// *managing* : ref on unix-socket object /// *managing* : ref on unix-socket object
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)] #[allow(dead_code)]
async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> { async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
socket.ready(Interest::WRITABLE).await?; socket.ready(Interest::WRITABLE).await?;
@ -48,8 +48,8 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
} }
/// # Fn `ha_healthcheck` /// # Fn `ha_healthcheck`
/// ## for sending data to host-agent using unix-socket /// ## for sending data to host-agent using unix-socket
/// ///
/// *input* : `&UnixStream`, `&str` /// *input* : `&UnixStream`, `&str`
/// ///
/// *output* : `Ok(()))` if data was sent| `Err(er)` if not /// *output* : `Ok(()))` if data was sent| `Err(er)` if not
@ -59,9 +59,9 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
/// *managing* : socket: `&UnixStream`, data: `&str` /// *managing* : socket: `&UnixStream`, data: `&str`
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)] #[allow(dead_code)]
async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error > { async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error> {
socket.ready(Interest::WRITABLE).await?; socket.ready(Interest::WRITABLE).await?;
socket.writable().await?; socket.writable().await?;
socket.try_write(data.as_bytes())?; socket.try_write(data.as_bytes())?;
@ -91,8 +91,8 @@ mod hagent_unittets {
// --Result<maybe Response> // --Result<maybe Response>
// one-shot func // one-shot func
async fn hagent_communication_test() { async fn hagent_communication_test() {
use crate::options::structs::{ProcessMetrics, ContainerMetrics, Metrics}; use crate::options::structs::{ContainerMetrics, Metrics, ProcessMetrics};
let procm = ProcessMetrics::new("test-prc", 15.0, 5.0); let procm = ProcessMetrics::new("test-prc", 15.0, 5.0);
let contm = ContainerMetrics::new("test", 32.0, 12.0, vec![procm.process_name.clone()]); let contm = ContainerMetrics::new("test", 32.0, 12.0, vec![procm.process_name.clone()]);
let metrics = Metrics::new(contm, vec![procm]); let metrics = Metrics::new(contm, vec![procm]);
@ -105,10 +105,11 @@ mod hagent_unittets {
let sock = sock.unwrap(); let sock = sock.unwrap();
assert!(ha_healthcheck(&sock).await.is_ok()); assert!(ha_healthcheck(&sock).await.is_ok());
assert!(ha_send_data(&sock, &metrics).await.is_ok()); assert!(ha_send_data(&sock, &metrics).await.is_ok());
} }
#[tokio::test] #[tokio::test]
async fn open_unixsocket_test() { async fn open_unixsocket_test() {
assert!(open_unix_socket("non/valid/socket/file.sock").await.is_err()); assert!(open_unix_socket("non/valid/socket/file.sock")
.await
.is_err());
} }
} }

View File

@ -1,230 +1,571 @@
// submodule needed to get metrics such as ///! Submodule needed to get metrics such as
// cpu load, ram/rom load and net activity ///! cpu load, ram/rom load and net activity
// use std::sync::Mutex; use crate::{
use std::sync::Arc; options::structs::ProcessState,
use crate::options::structs::TrackingProcess; utils::metrics::processes::{ProcessesGeneral, ProcessesQuery},
use sysinfo::{Process, System}; };
use tokio::join; use log::warn;
use crate::options::structs::{ProcessMetrics, ContainerMetrics}; use noxis_cli::metrics_models::MetricsMode;
use super::get_container_id; use std::{any::Any, collections::BTreeMap, sync::Arc};
// use pcap::{Device, Capture, Active}; // use chrono::Duration;
// use std::net::Ipv4Addr; use super::prcs::v2::Pid;
// use anyhow::{Result, Ok}; use crate::options::structs::bus::{BusMessage, BusMessageContentType, BusMessageDirection};
use serde::Serialize;
use std::fmt::Debug;
use sysinfo::{Disks as DisksList, Networks, System};
// use noxis_cli::metrics_models::MetricsMode;
// type PacketBuffer = Arc<Mutex<Vec<PacketInfo>>>; pub type MetricProcesses = Vec<ProcessExtended>;
type CoreUsage = BTreeMap<usize, CoreInfo>;
type Disks = Vec<Disk>;
type Ifaces = Vec<Network>;
type BusReciever = tokio::sync::mpsc::Receiver<BusMessage>;
type BusSender = Arc<tokio::sync::mpsc::Sender<BusMessage>>;
/// # Fn `init_metrics_grubber` /// # Fn `init_metrics_grubber`
/// ## for initializing process of unstoppable grubbing metrics. /// ## for initializing process of unstoppable grubbing metrics.
/// ///
/// *input* : `Arc<Mutex<UnixSocket>>` ?? /// *input* : `Arc<Mutex<UnixSocket>>` ??
/// ///
/// *output* : `Err` if it cant create grubbers | `Ok` on finish /// *output* : `Err` if it cant create grubbers | `Ok` on finish
/// ///
/// *initiator* : main thread ?? /// *initiator* : main thread ??
/// ///
/// *managing* : object of unix-socket reader /// *managing* : object of unix-socket reader
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)] pub async fn init_metrics_grubber(
pub async fn init_metrics_grubber() { /* BROADCSAT LISTENER TO GET `PROCESSES` OBJ */
bus_sender: BusSender,
bus_reciever: BusReciever,
) -> anyhow::Result<()> {
let mut system = System::new(); let mut system = System::new();
// let mut buffer: Vec<PacketInfo> = vec![]; let mut disks = DisksList::new_with_refreshed_list();
// let shared_buf: PacketBuffer = Arc::new(Mutex::new(buffer)); let mut networks = Networks::new_with_refreshed_list();
// get_all_metrics(&mut system).await;
/* TODO */
let mut bus_reciever = bus_reciever;
loop {
let msg = bus_reciever.try_recv();
if let Ok(BusMessage::Request(_, _, cont)) = msg {
system.refresh_all();
disks.refresh_list();
networks.refresh_list();
let cont: Box<dyn Any + Send> = cont;
match cont.downcast::<MetricsMode>() {
Err(_) => {
warn!("Unrecognized Metric mode was given");
let _ = bus_sender
.send(BusMessage::Response(
BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::new(Err(anyhow::Error::msg(format!(
"Unrecognized Metric mode was given"
)))),
))
.await;
}
Ok(mode) => {
tokio::time::sleep(tokio::time::Duration::from_millis(200)).await;
let metric: Box<dyn MetricsExportable> = match *mode {
MetricsMode::Full => {
let mut refs =
get_all_metrics(&mut system, bus_sender.clone(), &disks, &networks)
.await;
if let Some(prcs) = bus_reciever.recv().await {
if let BusMessage::Response(_, _, cont) = prcs {
let cont: Box<dyn Any> = cont;
if let Ok(cont) = cont.downcast::<ProcessesQuery>() {
if let ProcessesQuery::General(info) = *cont {
refs.processes = info;
}
}
}
}
Box::new(refs)
}
MetricsMode::Host => {
Box::new(get_global_host_info(&mut system, &disks, &networks).await)
}
MetricsMode::Cpu => Box::new(get_cpu_metrics(&mut system).await),
MetricsMode::Ram => Box::new(get_ram_metrics(&mut system).await),
MetricsMode::Rom => Box::new(get_all_disks_metrics(&disks).await),
MetricsMode::Network => Box::new(get_all_ifaces_metrics(&networks).await),
// inspect processes
MetricsMode::Processes => {
todo!();
}
};
// let metric: Box<dyn BusContent> = Box::new(metric);
let metric = metric.serialze_into_output();
system.refresh_all(); let _ = bus_sender
// let temp = String::from_utf8(get_pid("systemd").await.unwrap().stdout).unwrap(); .send(BusMessage::Response(
// let prc = system.process(Pid::from_str(&temp).unwrap()).unwrap(); BusMessageDirection::ToCli,
// prc. BusMessageContentType::MetricsObj,
// let _ = capture_packets(shared_buf.clone()).await; Box::new(metric),
))
.await;
}
}
} else if let Ok(BusMessage::Response(_, _, cont)) = msg {
let cont: Box<dyn Any + Send> = cont;
if let Ok(info) = cont.downcast::<ProcessesQuery>() {
if let ProcessesQuery::All(info) = *info {
let procs: Vec<_> = info
.into_iter()
.map(|prc| ProcessExtended::from_process_query_all(&mut system, prc))
.collect();
let _ = bus_sender
.send(BusMessage::Response(
BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::<anyhow::Result<String>>::new(Ok(serde_json::to_string_pretty(
&procs,
)?)),
))
.await;
} else {
let _ = bus_sender
.send(BusMessage::Response(
BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::new(Err(anyhow::Error::msg(format!(
"Unknown type was send by the Supervisor"
)))),
))
.await;
}
} else {
let _ = bus_sender
.send(BusMessage::Response(
BusMessageDirection::ToCli,
BusMessageContentType::Result,
Box::new(Err(anyhow::Error::msg(format!(
"Unknown type was send by the Supervisor"
)))),
))
.await;
}
}
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
} }
#[allow(dead_code)] async fn get_all_metrics(
#[allow(unused_variables)] system: &mut System,
async fn gather_metrics(proc: Arc<Process>) { sender: BusSender,
disks: &DisksList,
networks: &Networks,
) -> FullMetrics {
let host = get_host_info().await;
let cpu = get_cpu_metrics(system).await;
let ram = get_ram_metrics(system).await;
let disks = get_all_disks_metrics(&disks).await;
let ifaces = get_all_ifaces_metrics(&networks).await;
let prcs: Vec<ProcessesGeneral> = Vec::new();
let _ = sender
.send(BusMessage::Request(
BusMessageDirection::ToSupervisor,
BusMessageContentType::ProcessQuery,
Box::new(ProcessesQuery::QueryGeneral),
))
.await;
FullMetrics::create(host, cpu, ram, disks, ifaces, prcs)
} }
// DEPRECATED : for net monitoring async fn get_global_host_info(
// async fn capture_packets(buffer: PacketBuffer) -> Result<()> { system: &mut System,
// let mut cap = Capture::from_device(Device::lookup()?.unwrap())? disks: &DisksList,
// .promisc(true) networks: &Networks,
// .open()?; ) -> HostGeneral {
HostGeneral {
// cap.filter("not broadcast and not multicast", true)?; hostname: System::host_name().unwrap_or_default(),
os: System::long_os_version().unwrap_or_default(),
// while let core::result::Result::Ok(packet) = cap.next_packet() { kernel: System::kernel_version().unwrap_or_default(),
// if let Some((src, dst, prot)) = get_packet_info(&packet.data).await { cpu_percentage: system.global_cpu_usage(),
// let packet_info = PacketInfo::new(String::from(prot), dst, src, packet.header.len as usize); ram_available: system.total_memory() - system.free_memory(),
// let mut locked_buffer = buffer.lock().unwrap(); disk_percentage: {
// println!("{:?}", &packet_info); let total = disks
// locked_buffer.push(packet_info); .iter()
// } .map(|disk| disk.available_space() * 100 / disk.total_space())
// } .collect::<Vec<u64>>();
// Ok(()) total.iter().sum::<u64>() / (total.len() as u64)
// } },
// async fn get_packet_info(data: &[u8]) -> Option<(Ipv4Addr, Ipv4Addr, &str)> { net_stat: {
// if data.len() >= 20 { let total = networks
// let src_ip = Ipv4Addr::new(data[12], data[13], data[14], data[15]); .iter()
// let dst_ip = Ipv4Addr::new(data[16], data[17], data[18], data[19]); .map(|(_, iface_data)| iface_data.received() + iface_data.transmitted())
// let protocol = match data[9] { .collect::<Vec<u64>>();
// 1 => "ICMP", total.iter().sum::<u64>() / ((total.len() * 2) as u64)
// 6 => "TCP", },
// 17 => "UDP", }
// _ => "Unknown",
// };
// Some((src_ip, dst_ip, protocol))
// } else {
// None
// }
// }
/// # Fn `get_all_container_metrics`
/// ## for gathering all container (whole system metrics)
///
/// *input* : `Arc<System>`, `Arc<Vec<TrackingProcess>>`
///
/// *output* : `ContainerMetrics`
///
/// *initiator* : main thread ??
///
/// *managing* : ref counter to `System` object, ref counter to list of processes
///
/// *depends on* : `TrackingProcess`
///
#[allow(dead_code)]
async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProcess>>) -> ContainerMetrics {
let metrics = join!(
get_cpu_metrics_container(sys.clone()),
get_ram_metrics_container(sys.clone()),
get_subsystems(prcs.clone())
);
ContainerMetrics::new(
&get_container_id().unwrap_or(String::from("unknown")),
metrics.0,
metrics.1,
metrics.2
)
} }
/// # Fn `get_cpu_metrics_container` async fn get_host_info() -> HostInfo {
/// ## for gathering container cpu metrics HostInfo {
/// hostname: System::host_name().unwrap_or_default(),
/// *input* : `Arc<System>` os: System::long_os_version().unwrap_or_default(),
/// kernel: System::kernel_version().unwrap_or_default(),
/// *output* : `f32` }
///
/// *initiator* : main thread ??
///
/// *managing* : ref counter to `System` object
///
/// *depends on* : -
///
#[allow(dead_code)]
async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 {
sys.global_cpu_usage()
} }
/// # Fn `get_ram_metrics_container` async fn get_cpu_metrics(system: &mut System) -> Cpu {
/// ## for gathering container ram metrics let mut buffer = CoreUsage::new();
/// let global_usage = system.global_cpu_usage();
/// *input* : `Arc<System>`
///
/// *output* : `f32`
///
/// *initiator* : main thread ??
///
/// *managing* : ref counter to `System` object
///
/// *depends on* : -
///
#[allow(dead_code)]
async fn get_ram_metrics_container(sys: Arc<System>) -> f32 {
(sys.used_memory() / sys.total_memory()) as f32 * 100.0
}
// async fn get_mem_metrics_container(sys: Arc<System>) -> f32 {
// sys.
// }
/// # Fn `get_subsystems` system.cpus().iter().enumerate().for_each(|(id, cpu)| {
/// ## for gathering info about container subsystems (processes) let core_info = CoreInfo {
/// // id,
/// *input* : `Arc<Vec<TrackingProcess>>` brand: cpu.brand().to_string(),
/// name: cpu.name().to_string(),
/// *output* : `Vec<String>` frequency: cpu.frequency(),
/// vendor_id: cpu.vendor_id().to_string(),
/// *initiator* : main thread ?? usage: cpu.cpu_usage(),
/// };
/// *managing* : ref counter to list of `TrackingProcess` buffer.entry(id).or_insert(core_info);
/// });
/// *depends on* : `TrackingProcess`
/// Cpu {
#[allow(dead_code)] global_usage,
async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> { usage: buffer,
prcs.iter().map(|process| process.name.clone()).collect() }
} }
/// # Fn `get_all_metrics_process` async fn get_ram_metrics(system: &mut System) -> Ram {
/// ## for gathering all process' metrics Ram {
/// free_mem: system.free_memory(),
/// *input* : `Arc<Process>`, `Arc<System>` free_swap: system.free_swap(),
/// total_mem: system.total_memory(),
/// *output* : `ProcessMetrics` total_swap: system.total_swap(),
/// }
/// *initiator* : main thread ??
///
/// *managing* : two ref counters to `Process` and `System`
///
/// *depends on* : -
///
#[allow(dead_code)]
async fn get_all_metrics_process(proc: Arc<Process>, sys: Arc<System>) -> ProcessMetrics {
let metrics = join!(
get_cpu_metrics_process(proc.clone()),
get_ram_metrics_process(proc.clone(), sys.clone())
);
ProcessMetrics::new(
proc.name().to_str().unwrap_or("unknown"),
metrics.0,
metrics.1
)
} }
/// # Fn `get_cpu_metrics_process` async fn get_all_disks_metrics(disks: &DisksList) -> Disks {
/// ## for gathering process cpu metrics // let disks = DisksList::new_with_refreshed_list();
/// let mut buffer = Disks::new();
/// *input* : `Arc<Process>` disks.list().iter().for_each(|disk| {
/// let disk = Disk {
/// *output* : `f32` name: disk.name().to_string_lossy().into_owned(),
/// kind: disk.kind().to_string(),
/// *initiator* : main thread ?? fs: disk.file_system().to_string_lossy().into_owned(),
/// mount_point: disk.mount_point().to_string_lossy().into_owned(),
/// *managing* : ref counter to `Process` object total_space: disk.total_space(),
/// available_space: disk.available_space(),
/// *depends on* : - is_removable: disk.is_removable(),
/// is_readonly: disk.is_read_only(),
async fn get_cpu_metrics_process(proc: Arc<Process>) -> f32 { };
proc.cpu_usage() buffer.push(disk);
});
buffer
} }
/// # Fn `get_ram_metrics_process` async fn get_all_ifaces_metrics(networks: &Networks) -> Ifaces {
/// ## for gathering process ram metrics let mut ifaces = Ifaces::new();
/// networks.iter().for_each(|(iface_name, data)| {
/// *input* : `Arc<Process>` let mac = data.mac_address().to_string();
/// let ip_addrs = data
/// *output* : `f32` .ip_networks()
/// .iter()
/// *initiator* : main thread ?? .map(|ipaddr| format!("{}/{}", ipaddr.addr, ipaddr.prefix))
/// .collect::<Vec<String>>();
/// *managing* : ref counter to `Process` object
/// let iface = Network {
/// *depends on* : - iname: iface_name.to_owned(),
/// mac: mac,
async fn get_ram_metrics_process(proc: Arc<Process>, sys: Arc<System>) -> f32 { ip_addresses: ip_addrs,
(proc.memory() as f64 / sys.total_memory() as f64) as f32 * 100.0 as f32 recieved: data.received(),
transmitted: data.transmitted(),
total_recieved_bytes: data.total_received(),
total_transmitted_bytes: data.total_transmitted(),
total_recieved_packets: data.total_packets_received(),
total_transmitted_packets: data.total_packets_transmitted(),
errors_on_recieved: data.errors_on_received(),
errors_on_transmitted: data.errors_on_transmitted(),
};
ifaces.push(iface);
});
ifaces
}
pub mod processes {
use crate::options::structs::ProcessState;
use crate::utils::prcs::v2::Pid;
#[derive(Debug, serde::Serialize)]
pub enum ProcessesQuery {
General(Vec<ProcessesGeneral>),
All(Vec<ProcessesAll>),
QueryGeneral,
QueryAll,
}
#[derive(Debug, serde::Serialize)]
pub struct ProcessesGeneral {
pub name: String,
pub state: ProcessState,
pub pid: Pid,
}
#[derive(Debug, serde::Serialize)]
pub struct ProcessesAll {
pub name: String,
pub state: ProcessState,
pub pid: Pid,
pub dependencies: deps::Dependencies,
}
pub mod deps {
use crate::options::structs::{FileTriggers, ServiceState, ServiceTriggers};
use crate::utils::files::v2::FileState;
// use super::*;
#[derive(Debug, serde::Serialize)]
pub struct FilesExtended {
pub name: String,
pub path: String,
pub status: FileState,
pub backup_file : String,
pub triggers: FileTriggers,
}
#[derive(Debug, serde::Serialize)]
pub struct ServicesExtended {
pub name: String,
pub access_name: String,
pub status: ServiceState,
pub triggers: ServiceTriggers,
}
#[derive(Debug, serde::Serialize)]
pub struct Dependencies {
pub files: Vec<FilesExtended>,
pub services: Vec<ServicesExtended>,
}
}
}
pub trait MetricsExportable: Send + Sync + 'static + Debug + Any {
fn serialze_into_output(&self) -> anyhow::Result<String>;
}
#[derive(Serialize, Debug)]
struct FullMetrics {
hostname: String,
os: String,
kernel: String,
cpu: Cpu,
ram: Ram,
disks: Disks,
networks: Ifaces,
pub processes: Vec<ProcessesGeneral>,
}
impl FullMetrics {
fn create(
host: HostInfo,
cpu: Cpu,
ram: Ram,
disks: Disks,
ifaces: Ifaces,
processes: Vec<ProcessesGeneral>,
) -> Self {
Self {
hostname: host.hostname,
os: host.os,
kernel: host.kernel,
cpu,
ram,
disks,
networks: ifaces,
processes,
}
}
}
impl MetricsExportable for FullMetrics {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Debug, Serialize)]
struct HostInfo {
hostname: String,
os: String,
kernel: String,
}
impl MetricsExportable for HostInfo {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Debug, Serialize)]
struct HostGeneral {
hostname: String,
os: String,
kernel: String,
cpu_percentage: f32,
ram_available: u64,
disk_percentage: u64,
net_stat: u64,
}
impl MetricsExportable for HostGeneral {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Serialize, Debug)]
struct Cpu {
global_usage: f32,
usage: CoreUsage,
}
impl MetricsExportable for Cpu {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Serialize, Debug)]
struct CoreInfo {
name: String,
brand: String,
frequency: u64,
vendor_id: String,
usage: f32,
}
#[derive(Serialize, Debug)]
struct Ram {
free_mem: u64,
free_swap: u64,
total_mem: u64,
total_swap: u64,
}
impl MetricsExportable for Ram {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Serialize, Debug)]
struct Disk {
name: String,
kind: String,
fs: String,
mount_point: String,
total_space: u64,
available_space: u64,
is_removable: bool,
is_readonly: bool,
}
impl MetricsExportable for Disks {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
// vec<Network>
#[derive(Serialize, Debug)]
struct Network {
iname: String,
mac: String,
ip_addresses: Vec<String>,
recieved: u64,
transmitted: u64,
total_recieved_bytes: u64,
total_transmitted_bytes: u64,
total_recieved_packets: u64,
total_transmitted_packets: u64,
errors_on_recieved: u64,
errors_on_transmitted: u64,
}
impl MetricsExportable for Ifaces {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
}
#[derive(Serialize, Debug)]
pub struct ProcessExtended {
name: String,
status: ProcessState,
pid: Pid,
start_time: String,
duration: String,
dependencies: processes::deps::Dependencies,
cpu_usage: f32,
ram_usage: u64,
virtual_mem_usage: u64,
disks_usage_read_bytes: u64,
disks_usage_write_bytes: u64,
}
impl ProcessExtended {
pub fn from_process_query_all(system: &mut System, proc: processes::ProcessesAll) -> Self {
system.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
return if let Some(prc) = system.process(proc.pid.new_sysinfo_pid()) {
let disk_usage = prc.disk_usage();
let duration = chrono::Duration::new(prc.run_time() as i64, 0);
let start_time = chrono::DateTime::from_timestamp(prc.start_time() as i64, 0);
Self {
name: proc.name,
status: proc.state,
pid: proc.pid,
start_time : {
match start_time {
Some(date) => date.to_string(),
None => String::new()
}
},
duration: {
match duration {
Some(duration) => {
format!("{}:{}:{}:{}",
duration.num_days(),
duration.num_hours() % 24,
duration.num_minutes() % 60,
duration.num_seconds() % 60
)
},
None => String::new()
}
},
dependencies: proc.dependencies,
cpu_usage: prc.cpu_usage(),
ram_usage: prc.memory(),
virtual_mem_usage: prc.virtual_memory(),
disks_usage_read_bytes: disk_usage.read_bytes,
disks_usage_write_bytes: disk_usage.written_bytes,
}
} else {
Self {
name: proc.name,
status: proc.state,
pid: proc.pid,
start_time : String::new(),
duration: String::new(),
dependencies: proc.dependencies,
cpu_usage: 0.0,
ram_usage: 0,
virtual_mem_usage: 0,
disks_usage_read_bytes: 0,
disks_usage_write_bytes: 0,
}
};
}
}
impl MetricsExportable for MetricProcesses {
fn serialze_into_output(&self) -> anyhow::Result<String> {
Ok(serde_json::to_string_pretty(self)?)
}
} }
#[cfg(test)] #[cfg(test)]

View File

@ -1,24 +1,46 @@
use crate::options::structs::{Events, NegativeOutcomes, ProcessState, ProcessUnit};
use async_trait::async_trait;
use log::{error, warn}; use log::{error, warn};
use serde::Serialize;
use std::collections::HashSet;
use std::process::{Command, Output}; use std::process::{Command, Output};
use std::sync::Arc; use std::sync::Arc;
use tokio::time::Duration;
use crate::options::structs::{ProcessState, Events, NegativeOutcomes, ProcessUnit};
use std::collections::HashSet;
use tokio::sync::mpsc::Receiver as MpscReciever; use tokio::sync::mpsc::Receiver as MpscReciever;
use async_trait::async_trait; use tokio::time::Duration;
pub mod v2 { pub mod v2 {
use log::info;
use crate::options::structs::DependencyType; use crate::options::structs::DependencyType;
use crate::utils::metrics::processes::ProcessesGeneral;
use log::info;
use std::path::Path; use std::path::Path;
use tokio::time::sleep;
use super::*; use super::*;
#[derive(Debug, Serialize, Clone, Copy)]
pub struct Pid(u32);
impl std::fmt::Display for Pid {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
return write!(f, "{}", self.0);
}
}
impl Pid {
fn new() -> Self {
Pid(0)
}
#[allow(unused)]
pub fn new_sysinfo_pid(&self) -> sysinfo::Pid {
sysinfo::Pid::from_u32(self.0 as u32)
}
}
#[derive(Debug)] #[derive(Debug)]
pub struct ProcessesController { pub struct ProcessesController {
name: Arc<str>, pub name: Arc<str>,
pid: Pid,
bin: String, bin: String,
// obj: Arc<TrackingProcess>,
state: ProcessState, state: ProcessState,
event_reader: MpscReciever<Events>, event_reader: MpscReciever<Events>,
negative_events: HashSet<Arc<str>>, negative_events: HashSet<Arc<str>>,
@ -31,72 +53,259 @@ pub mod v2 {
} }
impl ProcessesController { impl ProcessesController {
#[inline(always)]
pub fn new(name: &str, event_reader: MpscReciever<Events>) -> ProcessesController { pub fn new(name: &str, event_reader: MpscReciever<Events>) -> ProcessesController {
ProcessesController { ProcessesController {
name : Arc::from(name), name: Arc::from(name),
bin : String::new(), pid: Pid::new(),
state : ProcessState::Stopped, bin: String::new(),
state: ProcessState::Stopped,
event_reader, event_reader,
negative_events : HashSet::new(), negative_events: HashSet::new(),
} }
} }
#[inline(always)]
pub fn with_exe(mut self, bin: impl AsRef<Path>) -> ProcessesController { pub fn with_exe(mut self, bin: impl AsRef<Path>) -> ProcessesController {
self.bin = bin.as_ref().to_string_lossy().into_owned(); self.bin = bin.as_ref().to_string_lossy().into_owned();
self self
} }
pub fn get_pid(&self) -> Pid {
self.pid
}
pub fn get_state(&self) -> ProcessState {
self.state
}
async fn trigger_on(&mut self, dep_name: &str, trigger: &str, dep_type: DependencyType) { async fn trigger_on(&mut self, dep_name: &str, trigger: &str, dep_type: DependencyType) {
match trigger { match trigger {
"stay" => { "stay" => {
info!("Event on {} `{}` for {}. Ignoring ...", dep_type, dep_name, self.name); info!(
}, "Event on {} `{}` for {}. Ignoring ...",
dep_type, dep_name, self.name
);
}
"stop" => { "stop" => {
if is_active(&self.name).await { if is_active(&self.name).await {
info!("Event on {} `{}` for {}. Stopping ...", dep_type, dep_name, self.name); info!(
terminate_process(&self.name).await; "Event on {} `{}` for {}. Stopping ...",
self.state = ProcessState::Stopped; dep_type, dep_name, self.name
);
match terminate_process(&self.name).await {
Ok(_) => {
info!("Process {} was stopped ...", &self.name);
self.state = ProcessState::Stopped;
self.pid = Pid::new();
}
Err(er) => {
error!("Cannot stop process {} : {}", self.name, er);
}
}
} }
}, }
"user-stop" => {
if is_active(&self.name).await {
info!(
"Event on {} `{}` for {}. Stopping ...",
dep_type, "User Stop Call", self.name
);
match terminate_process(&self.name).await {
Ok(_) => {
info!("Process {} was forcefully stopped ...", &self.name);
self.state = ProcessState::StoppedByCli;
self.pid = Pid::new();
}
Err(er) => {
error!("Cannot forcefully stop process {} : {}", self.name, er);
}
}
}
}
"user-hold" => {
if is_active(&self.name).await {
info!(
"Event on {} `{}` for {}. Stopping ...",
dep_type, "User Hold Call", self.name
);
match freeze_process(&self.name).await {
Ok(_) => {
info!("Process {} was forcefully frozen ...", &self.name);
self.state = ProcessState::HoldingByCli;
}
Err(er) => {
error!("Cannot forcefully freeze process {} : {}", self.name, er);
}
}
}
}
"hold" => { "hold" => {
if !is_frozen(&self.name).await { if !is_frozen(&self.name).await {
info!("Event on {} `{}` for {}. Freezing ...", dep_type, dep_name, self.name); info!(
freeze_process(&self.name).await; "Event on {} `{}` for {}. Freezing ...",
self.state = ProcessState::Holding; dep_type, dep_name, self.name
);
match freeze_process(&self.name).await {
Ok(_) => {
info!("Process {} was frozen ...", &self.name);
self.state = ProcessState::Holding;
}
Err(er) => {
error!("Cannot freeze process {} : {}", self.name, er);
}
}
} }
}, }
"restart" => { "restart" => {
info!("Event on {} `{}` for {}. Restarting ...", dep_type, dep_name, self.name); info!(
let _ = restart_process(&self.name, &self.bin).await; "Event on {} `{}` for {}. Restarting ...",
}, dep_type, dep_name, self.name
_ => error!("Impermissible trigger in file-trigger for {}. Ignoring event ...", self.name), );
let pid = restart_process(&self.name, &self.bin).await;
sleep(Duration::from_millis(100)).await;
if let Ok(pid) = pid {
self.pid = Pid(pid);
info!("{}: New PID - {}", self.name, self.pid);
}
}
_ => error!(
"Impermissible trigger in file-trigger for {}. Ignoring event ...",
self.name
),
} }
tokio::time::sleep(Duration::from_micros(100)).await; tokio::time::sleep(Duration::from_micros(100)).await;
} }
#[allow(unused)]
pub async fn stop_by_user_call(&mut self) -> anyhow::Result<()> {
terminate_process(&self.name).await?;
warn!("Process {} was stopped by user call ...", self.name);
self.state = ProcessState::StoppedByCli;
self.pid = Pid::new();
Ok(())
}
#[allow(unused)]
pub async fn freeze_by_user_call(&mut self) -> anyhow::Result<()> {
freeze_process(&self.name).await?;
warn!("Process {} was frozen by user call ...", self.name);
self.state = ProcessState::HoldingByCli;
Ok(())
}
#[allow(unused)]
pub async fn start_by_user_call(&mut self) -> anyhow::Result<()> {
if self.negative_events.is_empty() {
let pid = start_process(&self.name, &self.bin).await?;
warn!("Process {} was started by user call ...", self.name);
self.state = ProcessState::Pending;
self.pid = Pid(pid);
return Ok(());
} else {
warn!("Attempt to start process {} by user call was stopped due to existance of negative incidents ...", self.name);
return Err(anyhow::Error::msg(
format!("Attempt to start process {} by user call was stopped due to existance of negative incidents ...", self.name)
));
}
}
#[allow(unused)]
pub async fn unfreeze_by_user_call(&mut self) -> anyhow::Result<()> {
if self.negative_events.is_empty() {
unfreeze_process(&self.name).await?;
warn!("Process {} was unfrozen by user call ...", self.name);
self.state = ProcessState::Pending;
Ok(())
} else {
warn!("Attempt to unfreeze process {} by user call was stopped due to existance of negative incidents ...", self.name);
return Err(anyhow::Error::msg(
format!("Attempt to unfreeze process {} by user call was stopped due to existance of negative incidents ...", self.name)
));
}
}
#[allow(unused)]
pub async fn restart_by_user_call(&mut self) -> anyhow::Result<()> {
let pid = restart_process(&self.name, &self.bin).await?;
warn!("Process {} was restarted by user call ...", self.name);
self.pid = Pid(pid);
Ok(())
}
pub async fn get_general_info(&self) -> ProcessesGeneral {
ProcessesGeneral {
name: self.name.to_string(),
state: self.state,
pid: self.pid,
}
}
} }
#[async_trait] #[async_trait]
impl ProcessUnit for ProcessesController { impl ProcessUnit for ProcessesController {
async fn process(&mut self) { async fn process(&mut self) {
if self.negative_events.len() == 0 { if self.negative_events.len() == 0 {
match self.state { let conditions = (is_active(&self.name).await, is_frozen(&self.name).await);
ProcessState::Holding => { let state = &self.state;
info!("No negative dependecies events on {} process. Unfreezing ...", self.name); match (state, conditions) {
(ProcessState::Holding, (_, _)) => {
info!(
"No negative dependecies events on {} frozen process. Unfreezing ...",
self.name
);
if let Err(er) = unfreeze_process(&self.name).await {
if er.to_string().contains("already") {
self.state = ProcessState::Pending;
} else {
error!("Cannot unfreeze process {} : {}", self.name, er);
}
} else {
self.state = ProcessState::Pending;
info!("Process {} was unfreezed", &self.name);
}
}
(ProcessState::Stopped, (_, _)) => {
info!(
"No negative dependecies events on stopped {} process. Starting ...",
self.name
);
match start_process(&self.name, &self.bin).await {
Ok(pid) => {
self.state = ProcessState::Pending;
self.pid = Pid(pid);
info!("{}: New PID - {}", self.name, self.pid);
}
Err(er) => {
if er.to_string().contains("already") {
self.state = ProcessState::Pending;
} else {
error!("Cannot start process {} : {}", self.name, er);
}
}
}
}
(ProcessState::Pending, (false, false)) => {
info!(
"{} process was impermissibly stopped. Starting ...",
self.name
);
match start_process(&self.name, &self.bin).await {
Ok(pid) => {
self.state = ProcessState::Pending;
self.pid = Pid(pid);
info!("{}: New PID - {}", self.name, self.pid);
}
Err(er) => {
error!("Cannot start process {} : {}", self.name, er);
}
}
}
(ProcessState::Pending, (true, true)) => {
info!(
"No negative dependecies events on {} process. Unfreezing ...",
self.name
);
if let Err(er) = unfreeze_process(&self.name).await { if let Err(er) = unfreeze_process(&self.name).await {
error!("Cannot unfreeze process {} : {}", self.name, er); error!("Cannot unfreeze process {} : {}", self.name, er);
} else { } else {
self.state = ProcessState::Pending; self.state = ProcessState::Pending;
info!("Process {} was unfreezed", &self.name);
} }
}, }
ProcessState::Stopped => { _ => {}
info!("No negative dependecies events on {} process. Starting ...", self.name); }
if let Err(er) = start_process(&self.name, &self.bin).await {
error!("Cannot start process {} : {}", self.name, er);
} else {
self.state = ProcessState::Pending;
}
},
_ => {},
}
} }
while let Ok(event) = self.event_reader.try_recv() { while let Ok(event) = self.event_reader.try_recv() {
match event { match event {
@ -104,22 +313,16 @@ pub mod v2 {
if self.negative_events.contains(&target) { if self.negative_events.contains(&target) {
self.negative_events.remove(&target); self.negative_events.remove(&target);
} }
}, }
Events::Negative(event) => { Events::Negative(event) => match event {
match event { NegativeOutcomes::FileWasChanged(target, dep_type, trigger)
NegativeOutcomes::FileWasChanged(target, dep_type, trigger) | | NegativeOutcomes::FileWasMovedOrDeleted(target, dep_type, trigger)
NegativeOutcomes::FileWasMovedOrDeleted(target, dep_type, trigger) | | NegativeOutcomes::ServiceIsUnreachable(target, dep_type, trigger) => {
NegativeOutcomes::ServiceIsUnreachable(target, dep_type, trigger) => { if !self.negative_events.contains(&target) {
if !self.negative_events.contains(&target) { self.negative_events.insert(target.clone());
self.negative_events.insert(target.clone());
self.trigger_on(&target, &trigger, dep_type).await;
self.trigger_on( }
&target,
&trigger,
dep_type
).await;
}
},
} }
}, },
} }
@ -130,7 +333,7 @@ pub mod v2 {
/// # Fn `get_pid` /// # Fn `get_pid`
/// ## for initializing process of unstoppable grubbing metrics. /// ## for initializing process of unstoppable grubbing metrics.
/// ///
/// *input* : `&str` /// *input* : `&str`
/// ///
/// *output* : `None` if cant get process PID | `Some(Output)` on success /// *output* : `None` if cant get process PID | `Some(Output)` on success
@ -139,8 +342,8 @@ pub mod v2 {
/// ///
/// *managing* : process name /// *managing* : process name
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub async fn get_pid(name: &str) -> Option<Output> { pub async fn get_pid(name: &str) -> Option<Output> {
let name = Arc::new(name.to_string()); let name = Arc::new(name.to_string());
let res = let res =
@ -159,7 +362,7 @@ pub async fn get_pid(name: &str) -> Option<Output> {
/// # Fn `is_active` /// # Fn `is_active`
/// ## for checking process's activity state /// ## for checking process's activity state
/// ///
/// *input* : `&str` /// *input* : `&str`
/// ///
/// *output* : `true` if process running | `false` if not /// *output* : `true` if process running | `false` if not
@ -168,8 +371,8 @@ pub async fn get_pid(name: &str) -> Option<Output> {
/// ///
/// *managing* : process name /// *managing* : process name
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub async fn is_active(name: &str) -> bool { pub async fn is_active(name: &str) -> bool {
let arc_name = Arc::new(name.to_string()); let arc_name = Arc::new(name.to_string());
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
@ -188,7 +391,7 @@ pub async fn is_active(name: &str) -> bool {
/// # Fn `is_frozen` /// # Fn `is_frozen`
/// ## for checking process's hibernation state /// ## for checking process's hibernation state
/// ///
/// *input* : `&str` /// *input* : `&str`
/// ///
/// *output* : `true` if process is frozen | `false` if not /// *output* : `true` if process is frozen | `false` if not
@ -198,7 +401,7 @@ pub async fn is_active(name: &str) -> bool {
/// *managing* : process name /// *managing* : process name
/// ///
/// *depends on* : fn `get_pid` /// *depends on* : fn `get_pid`
/// ///
pub async fn is_frozen(name: &str) -> bool { pub async fn is_frozen(name: &str) -> bool {
let temp: Output; let temp: Output;
if let Some(output) = get_pid(name).await { if let Some(output) = get_pid(name).await {
@ -229,7 +432,7 @@ pub async fn is_frozen(name: &str) -> bool {
/// # Fn `terminate_process` /// # Fn `terminate_process`
/// ## for stop current process /// ## for stop current process
/// ///
/// *input* : `&str` /// *input* : `&str`
/// ///
/// *output* : () /// *output* : ()
@ -239,20 +442,21 @@ pub async fn is_frozen(name: &str) -> bool {
/// *managing* : process name /// *managing* : process name
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub async fn terminate_process(name: &str) { pub async fn terminate_process(name: &str) -> anyhow::Result<()> {
let _ = Command::new("pkill") if !is_active(name).await {
.arg(name) return Err(anyhow::Error::msg(format!(
.output() "Process {} is already stopped",
.unwrap_or_else(|_| { name
error!("Failed to execute command 'pkill'"); )));
std::process::exit(101); }
}); let _ = Command::new("pkill").arg(name).output()?;
Ok(())
} }
/// # Fn `terminate_process` /// # Fn `terminate_process`
/// ## for freeze/hibernate current process /// ## for freeze/hibernate current process
/// ///
/// *input* : `&str` /// *input* : `&str`
/// ///
/// *output* : () /// *output* : ()
@ -262,20 +466,15 @@ pub async fn terminate_process(name: &str) {
/// *managing* : process name /// *managing* : process name
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub async fn freeze_process(name: &str) { pub async fn freeze_process(name: &str) -> anyhow::Result<()> {
let _ = Command::new("pkill") let _ = Command::new("pkill").args(["-STOP", name]).output()?;
.args(["-STOP", name]) Ok(())
.output()
.unwrap_or_else(|_| {
error!("Failed to freeze process");
std::process::exit(101);
});
} }
/// # Fn `unfreeze_process` /// # Fn `unfreeze_process`
/// ## for unfreeze/hibernate current process /// ## for unfreeze/hibernate current process
/// ///
/// *input* : `&str` /// *input* : `&str`
/// ///
/// *output* : () /// *output* : ()
@ -285,17 +484,15 @@ pub async fn freeze_process(name: &str) {
/// *managing* : process name /// *managing* : process name
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub async fn unfreeze_process(name: &str) -> anyhow::Result<()> { pub async fn unfreeze_process(name: &str) -> anyhow::Result<()> {
let _ = Command::new("pkill") let _ = Command::new("pkill").args(["-CONT", name]).output()?;
.args(["-CONT", name])
.output()?;
Ok(()) Ok(())
} }
/// # Fn `restart_process` /// # Fn `restart_process`
/// ## for restarting current process /// ## for restarting current process
/// ///
/// *input* : `&str`, &str /// *input* : `&str`, &str
/// ///
/// *output* : () /// *output* : ()
@ -305,16 +502,16 @@ pub async fn unfreeze_process(name: &str) -> anyhow::Result<()> {
/// *managing* : process name and path to its exec file /// *managing* : process name and path to its exec file
/// ///
/// *depends on* : fn `start_process`, fn `terminate_process` /// *depends on* : fn `start_process`, fn `terminate_process`
/// ///
pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<()> { pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<u32> {
terminate_process(name).await; terminate_process(name).await?;
tokio::time::sleep(Duration::from_millis(100)).await; tokio::time::sleep(Duration::from_millis(100)).await;
start_process(name, path).await start_process(name, path).await
} }
/// # Fn `start_process` /// # Fn `start_process`
/// ## for starting current process /// ## for starting current process
/// ///
/// *input* : `&str`, &str /// *input* : `&str`, &str
/// ///
/// *output* : () /// *output* : ()
@ -324,20 +521,27 @@ pub async fn restart_process(name: &str, path: &str) -> anyhow::Result<()> {
/// *managing* : process name and path to its exec file /// *managing* : process name and path to its exec file
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
pub async fn start_process(name: &str, path: &str) -> anyhow::Result<()> { pub async fn start_process(name: &str, path: &str) -> anyhow::Result<u32> {
// let runsh = format!("{} {}", "exec", path); if is_active(name).await {
return Err(anyhow::Error::msg(format!(
"Process {} is already running",
name
)));
}
let mut command = Command::new(path); let mut command = Command::new(path);
// command.arg(path); // command.arg(path);
match command.spawn() { match command.spawn() {
Ok(_) => { Ok(child) => {
let pid = child.id();
warn!("Process {} is running now!", name); warn!("Process {} is running now!", name);
Ok(()) Ok(pid)
}
Err(er) => {
Err(anyhow::Error::msg(format!("Cannot start process {} due to {}", name, er)))
} }
Err(er) => Err(anyhow::Error::msg(format!(
"Cannot start process {} : {}",
name, er
))),
} }
} }
@ -356,8 +560,7 @@ mod process_unittests {
// let _ = std::io::stdout().write_all(b""); // let _ = std::io::stdout().write_all(b"");
let res1 = start_process("restart-prc", "./tests/examples/restart-prc").await; let res1 = start_process("restart-prc", "./tests/examples/restart-prc").await;
assert!(res1.is_ok()); assert!(res1.is_ok());
let res2 = let res2 = restart_process("restart-prc", "./tests/examples/restart-prc").await;
restart_process("restart-prc", "./tests/examples/restart-prc").await;
assert!(res2.is_ok()); assert!(res2.is_ok());
let _ = terminate_process("restart-prc").await; let _ = terminate_process("restart-prc").await;
let res3 = is_active("restart-prc").await; let res3 = is_active("restart-prc").await;
@ -384,6 +587,7 @@ mod process_unittests {
let res1 = start_process("freeze-check", "./tests/examples/freeze-check").await; let res1 = start_process("freeze-check", "./tests/examples/freeze-check").await;
assert!(res1.is_ok()); assert!(res1.is_ok());
assert!(!is_frozen("freeze-check").await); assert!(!is_frozen("freeze-check").await);
let _ = terminate_process("freeze-check").await;
} }
#[tokio::test] #[tokio::test]
async fn pidof_active_process() { async fn pidof_active_process() {

View File

@ -1,18 +1,20 @@
use crate::options::structs::CustomError;
use log::{error, warn};
use std::net::{TcpStream, ToSocketAddrs};
use std::sync::Arc;
use tokio::time::Duration;
use tokio::sync::mpsc::Sender as Sender;
use async_trait::async_trait; use async_trait::async_trait;
use futures::future::Future;
use log::{error, warn};
use std::net::ToSocketAddrs;
use std::pin::Pin;
use std::sync::Arc;
use tokio::sync::mpsc::Sender;
use tokio::time::Duration;
pub mod v2 { pub mod v2 {
use futures::FutureExt;
use log::info; use log::info;
use crate::options::structs::{Triggers, ProcessUnit, Events, ServiceState}; use crate::options::structs::{Events, ProcessUnit, ServiceState, Triggers};
use super::*; use super::*;
use std::collections::{HashMap, BTreeMap, VecDeque}; use std::collections::{BTreeMap, HashMap, VecDeque};
type MpscSender = Arc<Sender<Events>>; type MpscSender = Arc<Sender<Events>>;
// type EventHandlers<'a> = Vec<MpscSender<Events<'a>>>; // type EventHandlers<'a> = Vec<MpscSender<Events<'a>>>;
@ -24,45 +26,43 @@ pub mod v2 {
pub struct ServicesController { pub struct ServicesController {
// i.e. yandex.ru // i.e. yandex.ru
#[allow(unused)] #[allow(unused)]
name : String, name: String,
// i.e. yandex.ru:443 // i.e. yandex.ru:443
access_url : Arc<str>, access_url: Arc<str>,
// "OK" or "Unavailable" // "OK" or "Unavailable"
state: ServiceState, state: ServiceState,
// btree map with key as max wait time and it's key to hashmap // btree map with key as max wait time and it's key to hashmap
config: ConnectionQueue, config: ConnectionQueue,
// Map of processes with their (trigger and mpsc sender) // Map of processes with their (trigger and mpsc sender)
event_registrator : EventHandlers, event_registrator: EventHandlers,
} }
impl PartialEq for ServicesController { impl PartialEq for ServicesController {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.access_url == other.access_url self.access_url == other.access_url
} }
} }
impl ServicesController { impl ServicesController {
#[inline(always)]
pub fn new() -> ServicesController { pub fn new() -> ServicesController {
ServicesController { ServicesController {
name : String::new(), name: String::new(),
access_url : Arc::from(String::new()), access_url: Arc::from(String::new()),
state : ServiceState::Unavailable, state: ServiceState::Ok,
config: ConnectionQueue::new(), config: ConnectionQueue::new(),
event_registrator : EventHandlers::new(), event_registrator: EventHandlers::new(),
} }
} }
pub fn with_access_name( #[inline(always)]
mut self, pub fn with_access_name(mut self, hostname: &str, access_url: &str) -> ServicesController {
hostname: &str,
access_url: &str,
) -> ServicesController {
self.name = hostname.to_string(); self.name = hostname.to_string();
self.access_url = Arc::from(access_url); self.access_url = Arc::from(access_url);
self self
} }
#[inline(always)]
pub fn with_params( pub fn with_params(
mut self, mut self,
conn_queue: ConnectionQueue, conn_queue: ConnectionQueue,
event_reg: EventHandlers, event_reg: EventHandlers,
) -> ServicesController { ) -> ServicesController {
@ -72,99 +72,185 @@ pub mod v2 {
} }
pub fn get_access_url(hostname: &str, port: Option<&u32>) -> String { pub fn get_access_url(hostname: &str, port: Option<&u32>) -> String {
format!("{}{}", hostname, port.map_or_else(|| "".to_string(), |p| format!(":{}", p))) format!(
"{}{}",
hostname,
port.map_or_else(|| "".to_string(), |p| format!(":{}", p))
)
} }
pub fn add_process( pub fn get_state(&self) -> ServiceState {
&mut self, self.state
proc_name: &str, }
trigger: Triggers, pub fn add_process(&mut self, proc_name: &str, trigger: Triggers, sender: MpscSender) {
sender: MpscSender,
) {
let proc_name: Arc<str> = Arc::from(proc_name); let proc_name: Arc<str> = Arc::from(proc_name);
// queue add // queue add
if let Triggers::Service { wait, .. } = trigger { if let Triggers::Service { wait, .. } = trigger {
self.config.entry(wait) self.config
.and_modify(|el| el.push_back(proc_name.clone())) .entry(wait)
.or_insert({ .and_modify(|el| el.push_back(proc_name.clone()))
let mut temp = VecDeque::new(); .or_insert({
temp.push_back(proc_name.clone()); let mut temp = VecDeque::new();
temp temp.push_back(proc_name.clone());
}); temp
});
} }
// event add // event add
self.event_registrator.entry(proc_name).or_insert((trigger, sender)); self.event_registrator
.entry(proc_name)
.or_insert((trigger, sender));
} }
async fn check_state(&self) -> anyhow::Result<()> { async fn check_state(&self) -> anyhow::Result<()> {
let mut addrs = self.access_url.to_socket_addrs()?; let url = self.access_url.clone();
if !addrs.any(|a| TcpStream::connect_timeout(&a, Duration::new(1, 0)).is_ok()) { let resolve_future = tokio::task::spawn_blocking(move || url.to_socket_addrs());
return Err(anyhow::Error::msg(format!("No access to service `{}`", &self.access_url))) let addrs: Vec<_> =
match tokio::time::timeout(Duration::from_secs(1), resolve_future).await {
Ok(Ok(addrs)) => addrs?.collect(),
Ok(Err(er)) => return Err(er.into()),
Err(_) => return Err(anyhow::Error::msg("DNS resolution timeout")),
};
if addrs.is_empty() {
return Err(anyhow::Error::msg("No addresses resolved"));
} }
let tasks: Vec<_> = addrs
.into_iter()
.map(|addr| async move {
match tokio::time::timeout(
Duration::from_secs(2),
tokio::net::TcpStream::connect(&addr),
)
.await
{
Ok(Ok(_)) => Some(addr),
_ => None,
}
})
.collect();
let mut any_success = false;
for task in futures::future::join_all(tasks).await {
if task.is_some() {
any_success = true;
break;
}
}
if !any_success {
return Err(anyhow::Error::msg(format!(
"No access to service `{}`",
&self.access_url
)));
}
Ok(()) Ok(())
} }
async fn trigger_on(&mut self) { async fn trigger_on(&mut self) {
match self.state { match self.state {
ServiceState::Ok => { ServiceState::Ok => {
let _ = self.event_registrator let futures: Vec<Pin<Box<dyn Future<Output = ()> + Send>>> = self
.iter() .event_registrator
.map(|(_, (_, el))| async { .iter()
let _ = el.send(Events::Positive(self.access_url.clone())).await; .map(|(prc, (_, sender_opt))| (prc, (self.access_url.clone(), sender_opt)))
}); .map(|(prc, (serv, sender_opt))| async move {
}, info!("Notifying process {} ...", prc);
let _ = sender_opt.send(Events::Positive(serv.clone())).await;
})
.map(|fut| fut.boxed())
.collect();
futures::future::join_all(futures).await;
}
ServiceState::Unavailable => { ServiceState::Unavailable => {
// looped check and notifying // looped check and notifying
self.looped_check().await; self.looped_check().await;
}, }
} }
} }
async fn looped_check(self: &mut Self) { async fn looped_check(self: &mut Self) {
let longest = self.config.last_entry().unwrap(); let longest = self.config.last_entry().unwrap();
let longest = longest.key(); let longest = longest.key();
let mut interapter = tokio::time::interval(tokio::time::Duration::from_secs(1)); let mut interapter = tokio::time::interval(tokio::time::Duration::from_secs(1));
let timer = tokio::time::Instant::now(); let timer = tokio::time::Instant::now();
let mut attempt: u32 = 1; let mut attempt: u32 = 1;
let access_url = Arc::new(self.access_url.clone()); let access_url = Arc::new(self.access_url.clone());
// let event_registrator = &mut self.event_registrator;
if let Err(_) = tokio::time::timeout(tokio::time::Duration::from_secs((longest + 1) as u64), async { if let Err(_) = tokio::time::timeout(
// let access_url = access_url.clone(); tokio::time::Duration::from_secs((longest + 1) as u64),
loop { async {
interapter.tick().await; // let access_url = access_url.clone();
info!("Trying to connect to {} (attempt: {}) ...", &access_url, attempt); loop {
attempt += 1; interapter.tick().await;
info!(
"Trying to connect to {} (attempt: {}) ...",
&access_url, attempt
);
attempt += 1;
let state_check_result = self.check_state().await; let state_check_result = self.check_state().await;
if state_check_result.is_ok() { if state_check_result.is_ok() {
info!("Connection to {} is `OK` now", &access_url); info!("Connection to {} is `OK` now", &access_url);
self.state = ServiceState::Ok; self.state = ServiceState::Ok;
break; let futures: Vec<Pin<Box<dyn Future<Output = ()> + Send>>> = self
} else { .event_registrator
let now = timer.elapsed(); .iter()
let iterator = self.config.iter() .map(|(prc, (_, sender_opt))| {
.filter(|(&a, _)| tokio::time::Duration::from_secs(a as u64) <= now) (prc, (self.access_url.clone(), sender_opt))
.flat_map(|(_, a)| a.iter().cloned()) })
.collect::<VecDeque<Arc<str>>>(); .map(|(prc, (serv, sender_opt))| async move {
info!("Notifying process {} ...", prc);
let _ = sender_opt.send(Events::Positive(serv.clone()));
})
.map(|fut| fut.boxed())
.collect();
for name in iterator { futures::future::join_all(futures).await;
let proc_name = name.to_string(); break;
info!("Trying to notify process `{}` ...", &proc_name); } else {
let sender_opt = self.event_registrator.get(&name) let now = timer.elapsed();
.map(|(trigger, sender)|
(trigger.to_service_negative_event(name.clone()), sender)
);
if let Some((tr, tx)) = sender_opt { let iterator = self
let _ = tx.send(tr.unwrap()).await; .config
} else { .iter()
error!("Cannot find {} channel sender in {} service", name.clone(), &self.access_url) .filter(|(&wait, _)| {
tokio::time::Duration::from_secs(wait as u64) <= now
})
.flat_map(|(_, a)| a.iter().cloned())
.collect::<VecDeque<Arc<str>>>();
for name in iterator {
let proc_name = name.to_string();
info!("Trying to notify process `{}` ...", &proc_name);
let sender_opt =
self.event_registrator.get(&name).map(|(trigger, sender)| {
(
trigger
.to_service_negative_event(self.access_url.clone()),
sender,
)
});
if let Some((tr, tx)) = sender_opt {
let _ = tx.send(tr.unwrap()).await;
} else {
error!(
"Cannot find {} channel sender in {} service",
name.clone(),
&self.access_url
)
}
} }
} }
} }
} },
}).await { )
.await
{
info!("Timeout of establishing connection to {}. ", &access_url); info!("Timeout of establishing connection to {}. ", &access_url);
} }
} }
pub fn get_arc_access_url(&self) -> Arc<str> {
self.access_url.clone()
}
} }
#[async_trait] #[async_trait]
impl ProcessUnit for ServicesController { impl ProcessUnit for ServicesController {
@ -176,181 +262,30 @@ pub mod v2 {
warn!("Connection with `{}` service was established. Notifying {} process(es) ...", &self.access_url, &self.event_registrator.len()); warn!("Connection with `{}` service was established. Notifying {} process(es) ...", &self.access_url, &self.event_registrator.len());
self.state = ServiceState::Ok; self.state = ServiceState::Ok;
self.trigger_on().await; self.trigger_on().await;
}, }
(ServiceState::Ok, Err(_)) => { (ServiceState::Ok, Err(_)) => {
warn!("Unreachable for connection service `{}`. Notifying {} process(es) ...", &self.access_url, &self.event_registrator.len()); warn!("Unreachable for connection service `{}`. Initializing reconnect mechanism ...", &self.access_url);
self.state = ServiceState::Unavailable; self.state = ServiceState::Unavailable;
self.trigger_on().await; self.trigger_on().await;
}, }
(ServiceState::Unavailable, Err(_)) => warn!("Service {} is still unreachable", &self.access_url), (ServiceState::Unavailable, Err(_)) => {
_ => { /* DEAD END WITH NO INTEREST */ }, warn!("Service {} is still unreachable", &self.access_url)
}
_ => { /* DEAD END WITH NO INTEREST */ }
} }
} }
} }
} }
/// # Fn `service_handler`
/// ## function to realize mechanism of current process' dep services monitoring
///
/// *input* : `&str`, `&Vec<Services>`, `Arc<mpsc::Sender<u8>>`
///
/// *output* : ()
///
/// *initiator* : fn `utils::running_handler`
///
/// *managing* : process name, ref of vec of dep services, ref counter to managing channel writer
///
/// *depends on* : fn `check_service`, fn `utils::prcs::is_active`, fn `utils::prcs::is_frozen`, fn `looped_service_connecting`
///
// pub async fn service_handler(
// name: &str,
// services: &Vec<Services>,
// tx: Arc<mpsc::Sender<u8>>,
// ) -> Result<(), CustomError> {
// // println!("service daemon on {}", name);
// for serv in services {
// if check_service(&serv.hostname, &serv.port).await.is_err() {
// if !is_active(name).await || is_frozen(name).await {
// return Err(CustomError::Fatal);
// }
// error!(
// "Service {}:{} is unreachable for process {}",
// &serv.hostname, &serv.port, &name
// );
// match serv.triggers.on_lost.as_str() {
// "stay" => {
// tx.send(4).await.unwrap();
// continue;
// }
// "stop" => {
// if looped_service_connecting(name, serv).await.is_err() {
// tx.send(5).await.unwrap();
// tokio::task::yield_now().await;
// return Err(CustomError::Fatal);
// }
// }
// "hold" => {
// // if is_frozen(name).await {
// // return Err(CustomError::Fatal);
// // }
// if looped_service_connecting(name, serv).await.is_err() {
// tx.send(6).await.unwrap();
// tokio::task::yield_now().await;
// return Err(CustomError::Fatal);
// }
// }
// _ => {
// tx.send(101).await.unwrap();
// return Err(CustomError::Fatal);
// }
// }
// }
// }
// tokio::time::sleep(Duration::from_millis(100)).await;
// Ok(())
// }
/// # Fn `looped_service_connecting`
/// ## for service's state check in loop (with delay and restriction of attempts)
///
/// *input* : `&str`, `&Services`
///
/// *output* : Ok(()) if service now available | Err(er) if still not
///
/// *initiator* : fn `service_handler`
///
/// *managing* : process name, current service struct
///
/// *depends on* : fn `check_service`
///
// async fn looped_service_connecting(name: &str, serv: &Services) -> Result<(), CustomError> {
// if serv.triggers.wait == 0 {
// loop {
// tokio::time::sleep(Duration::from_secs(serv.triggers.delay.into())).await;
// warn!(
// "Attempting to connect from {} process to {}:{}",
// &name, &serv.hostname, &serv.port
// );
// match check_service(&serv.hostname, &serv.port).await {
// Ok(_) => {
// log::info!(
// "Successfully connected to {} from {} process!",
// &serv.hostname,
// &name
// );
// break;
// }
// Err(_) => {
// tokio::task::yield_now().await;
// }
// }
// }
// Ok(())
// } else {
// let start = Instant::now();
// while start.elapsed().as_secs() < serv.triggers.wait.into() {
// tokio::time::sleep(Duration::from_secs(serv.triggers.delay.into())).await;
// warn!(
// "Attempting to connect from {} process to {}:{}",
// &name, &serv.hostname, &serv.port
// );
// match check_service(&serv.hostname, &serv.port).await {
// Ok(_) => {
// log::info!(
// "Successfully connected to {} from {} process!",
// &serv.hostname,
// &name
// );
// return Ok(());
// }
// Err(_) => {
// tokio::task::yield_now().await;
// }
// }
// }
// Err(CustomError::Fatal)
// }
// }
/// # Fn `check_service`
/// ## for check current service's availiability
///
/// *input* : `&str`, `&u32`
///
/// *output* : Ok(()) if service now available | Err(er) if still not
///
/// *initiator* : fn `service_handler`, fn `looped_service_connecting`
///
/// *managing* : hostname, port
///
/// *depends on* : -
///
// ! have to be rewritten
// todo: rewrite use
async fn check_service(hostname: &str, port: &u32) -> Result<(), CustomError> {
let addr = format!("{}:{}", hostname, port);
match addr.to_socket_addrs() {
Ok(mut addrs) => {
if addrs.any(|a| TcpStream::connect_timeout(&a, Duration::new(1, 0)).is_ok()) {
Ok(())
} else {
Err(CustomError::Fatal)
}
}
Err(_) => Err(CustomError::Fatal),
}
}
#[cfg(test)] #[cfg(test)]
mod service_unittests { mod service_unittests {
use super::check_service; // use super::check_service;
#[tokio::test] // #[tokio::test]
async fn check_available_service() { // async fn check_available_service() {
assert!(check_service("ya.ru", &443).await.is_ok()); // assert!(check_service("ya.ru", &443).await.is_ok());
} // }
#[tokio::test] // #[tokio::test]
async fn check_unavailable_service() { // async fn check_unavailable_service() {
assert!(check_service("unavailable.service", &1111).await.is_err()); // assert!(check_service("unavailable.service", &1111).await.is_err());
} // }
} }

View File

@ -1,5 +1,7 @@
{ {
"dateOfCreation": "1", "dateOfCreation": "1",
"configServer": "", "processes": []
}
,
"processes": [] "processes": []
} }

View File

@ -1,5 +1,7 @@
{ {
"dateOfCreation": "1", "dateOfCreation": "1",
"configServer": "", "processes": []
}
,
"processes": [] "processes": []
} }

View File

@ -1,6 +1,5 @@
{ {
"dateOfCreation": "1721381809103", "dateOfCreation": "1721381809103",
"configServer" : "localhost",
"processes": [ "processes": [
{ {
"name": "temp-process", "name": "temp-process",
@ -12,7 +11,8 @@
"src": "/home/vladislav/web/runner-rs/examples/", "src": "/home/vladislav/web/runner-rs/examples/",
"triggers": { "triggers": {
"onDelete": "hold", "onDelete": "hold",
"onChange": "stop" "onChange": "stop",
"doRestore" : true
} }
} }
], ],