Compare commits

..

No commits in common. "master" and "v0.9.25" have entirely different histories.

45 changed files with 1501 additions and 1548 deletions

5
.gitignore vendored
View File

@ -1,6 +1,5 @@
/target /target
.idea .idea
/.env Dockerfile
Cargo.lock Cargo.lock
hagent_test.sock settings.json
release

1089
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,20 @@
[workspace] [package]
resolver = "2" name = "runner-rs"
members = [ version = "0.9.25"
"noxis-rs", edition = "2021"
"noxis-cli",
]
[profile.dev] [profile.dev]
debug = true debug = true
[profile.test] [dependencies]
debug = false anyhow = "1.0.93"
chrono = "0.4.38"
env_logger = "0.11.3"
inotify = "0.10.2"
log = "0.4.22"
pcap = "2.2.0"
redis = "0.25.4"
serde = { version = "1.0.203", features = ["derive"] }
serde_json = "1.0.118"
sysinfo = "0.32.0"
tokio = { version = "1.38.0", features = ["full", "time"] }

View File

@ -1,31 +1,25 @@
FROM ubuntu:22.04 FROM ubuntu
USER root RUN mkdir -p /usr/src/kii/
RUN apt update && apt install -y \
curl \
build-essential \
libssl-dev \
pkg-config \
libudev-dev \
procps \
gcc-riscv64-unknown-elf \
gcc-riscv64-linux-gnu \
binutils-riscv64-linux-gnu \
&& rm -rf /var/lib/apt/lists/*
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
WORKDIR /usr/src/kii/ WORKDIR /usr/src/kii/
COPY . ./ RUN mkdir monitor/
RUN mkdir -p services/temp-process/
RUN touch services/temp-process/dep.txt
RUN touch services/temp-process/run.sh
RUN echo "./services/temp-process/temp-process &>/dev/null" >> services/temp-process/run.sh
RUN chmod +x noxis-rs/temp-process COPY target/x86_64-unknown-linux-gnu/release/runner-rs monitor/
COPY settings.json .
COPY temp-process services/temp-process/
RUN rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu RUN chmod +x services/temp-process/temp-process
RUN chmod +x services/temp-process/run.sh
RUN chmod +x monitor/runner-rs
RUN cargo unibuild # some troubles with execution this row-cmd
# ?: cannot get while initializing container
RUN export ENODE_CID=$(cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' "{print \$6}")
ENTRYPOINT ["cargo", "test"] ENTRYPOINT [ "/usr/src/kii/monitor/runner-rs" ]

243
Jenkinsfile vendored
View File

@ -1,243 +0,0 @@
pipeline {
agent any
stages {
stage('Tests and compiling binaries') {
when {
expression { env.CHANGE_BRANCH?.startsWith('feature/') || env.CHANGE_BRANCH?.startsWith('rc') }
}
steps {
script {
echo "Building and running tests in Docker for feature branch..."
try {
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
sh "mkdir -p ${targetDirAmd}"
sh "mkdir -p ${targetDirRisc}"
sh """
docker build --network=host -t e-monitor .
docker run --name e-monitor --dns 8.8.8.8 --network=host e-monitor:latest
"""
sh "cp noxis-rs/settings.json ${targetDirAmd}"
sh "cp noxis-rs/settings.json ${targetDirRisc}"
sh "docker cp e-monitor:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-cli ${targetDirAmd}"
sh "docker cp e-monitor:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-rs ${targetDirAmd}"
sh "docker cp e-monitor:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-cli ${targetDirRisc}"
sh "docker cp e-monitor:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-rs ${targetDirRisc}"
echo "Tests passed successfully and binaries were extracted!"
} catch (Exception e) {
echo "Tests failed during Docker run."
error "Build failed at 'CI for feature' stage."
}
}
}
}
stage('Calculate Install Size') {
when {
expression { env.CHANGE_BRANCH?.startsWith('rc') }
}
steps {
script {
echo "Calculating installation size for rc branch..."
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
def installSizeAmd = sh(script: "du -s --block-size=1024 ${targetDirAmd} | awk '{print \$1}'", returnStdout: true).trim()
def installSizeRisc = sh(script: "du -s --block-size=1024 ${targetDirRisc} | awk '{print \$1}'", returnStdout: true).trim()
env.INSTALL_SIZE_AMD = installSizeAmd
env.INSTALL_SIZE_RISC = installSizeRisc
echo "Installation size for amd64: ${env.INSTALL_SIZE_AMD} kB"
echo "Installation size for riscv64: ${env.INSTALL_SIZE_RISC} kB"
}
}
}
stage('Create Deb Packages') {
when {
expression { env.CHANGE_BRANCH?.startsWith('rc') }
}
steps {
script {
echo "Creating deb packages for rc branch..."
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
def packageName = "noxis"
def version = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
def createDebPackage = { arch, binDir, targetDir, installSize ->
echo "Creating deb package for ${arch}..."
sh """
mkdir -p ${targetDir}/package/DEBIAN
mkdir -p ${targetDir}/package/usr/local/enode/${packageName}
mkdir -p ${targetDir}/package/usr/bin
mkdir -p ${targetDir}/package/etc/enode
mkdir -p ${targetDir}/package/lib/systemd/system
cp ${binDir}/noxis-cli ${targetDir}/package/usr/local/enode/${packageName}/
cp ${binDir}/noxis-rs ${targetDir}/package/usr/local/enode/${packageName}/
cp ${binDir}/settings.json ${targetDir}/package/etc/enode/
cat > ${targetDir}/package/DEBIAN/control <<EOF
Package: ${packageName}
Version: ${version}
Section: unknown
Priority: optional
Architecture: ${arch}
Maintainer: kis <supervisor@rosatom.ru>
Description: Noxis Agent Linux
Installed-Size: ${installSize}
EOF
chmod +x ${targetDir}/package/usr/local/enode/${packageName}/noxis-cli
chmod +x ${targetDir}/package/usr/local/enode/${packageName}/noxis-rs
cat > ${targetDir}/package/DEBIAN/postinst <<EOF
#!/bin/bash
ln -sf "/usr/local/enode/${packageName}/noxis-cli" "/usr/bin/noxis-cli"
ln -sf "/usr/local/enode/${packageName}/noxis-rs" "/usr/bin/noxis-rs"
systemctl daemon-reload
systemctl start ${packageName}.service
EOF
chmod +x ${targetDir}/package/DEBIAN/postinst
cat > ${targetDir}/package/lib/systemd/system/${packageName}.service <<EOF
[Unit]
Description=Noxis Service
After=network.target
[Service]
ExecStart=/usr/local/enode/${packageName}/noxis-rs
Restart=always
[Install]
WantedBy=multi-user.target
EOF
dpkg-deb --build ${targetDir}/package ${targetDir}/rc/${arch}/${packageName}_${version}_${arch}.deb
echo "${packageName}_${version}_${arch}.deb created successfully!"
"""
}
createDebPackage("amd64", targetDirAmd, env.WORKSPACE, env.INSTALL_SIZE_AMD)
createDebPackage("riscv64", targetDirRisc, env.WORKSPACE, env.INSTALL_SIZE_RISC)
env.DEB_PATH_AMD64 = "${env.WORKSPACE}/rc/amd64/${packageName}_${version}_amd64.deb"
env.DEB_PATH_RISCV64 = "${env.WORKSPACE}/rc/riscv64/${packageName}_${version}_riscv64.deb"
}
}
}
stage('Transfer Binaries') {
when {
expression { env.CHANGE_BRANCH?.startsWith('feature/') }
}
steps {
script {
echo "Transferring binaries packages to remote machine..."
withCredentials([usernamePassword(credentialsId: 'ift', passwordVariable: 'SSH_PASS', usernameVariable: 'SSH_USER')]) {
def targetDir = "${env.WORKSPACE}/${env.CHANGE_BRANCH}"
def remote = [:]
remote.name = "remote-server"
remote.host = "192.168.2.33"
remote.user = SSH_USER
remote.password = SSH_PASS
remote.allowAnyHosts = true
sshPut remote: remote, from: "${targetDir}", into: "/home/user/deployments/"
echo "Binaries successfully transferred to remote machine."
}
}
}
}
stage('Upload Debs to Repository') {
when {
expression { env.CHANGE_BRANCH?.startsWith('rc') }
}
steps {
script {
echo "Uploading deb packages to remote repository..."
withCredentials([usernamePassword(credentialsId: 'prod', passwordVariable: 'SSH_PASS', usernameVariable: 'SSH_USER')]) {
def remote = [:]
remote.name = "remote-server"
remote.host = "192.168.2.99"
remote.user = SSH_USER
remote.password = SSH_PASS
remote.allowAnyHosts = true
echo "Uploading deb packages using sshPut..."
sshPut remote: remote, from: "${env.DEB_PATH_AMD64}", into: "/home/user/repo/debs/"
sshPut remote: remote, from: "${env.DEB_PATH_RISCV64}", into: "/home/user/repo/debs/"
echo "Running repository update commands via sshCommand..."
sshCommand remote: remote, command: '''
export DEBIAN_FRONTEND=noninteractive
cd /home/user/repo/debs/
for deb in *.deb; do
reprepro -b /var/www/deb/debian/ includedeb stable $deb
done
rm -f *.deb
'''
echo "Deb packages successfully uploaded and added to the repository!"
}
}
}
}
}
post {
always {
script {
echo "Cleaning up workspace..."
try {
if (fileExists("${env.WORKSPACE}/package/")) {
sh "rm -rf ${env.WORKSPACE}/package/"
}
if (fileExists("${env.WORKSPACE}/rc/")) {
sh "rm -rf ${env.WORKSPACE}/rc/"
}
sh "docker stop e-monitor && docker rm e-monitor"
} catch (Exception e) {
echo "Failed to clean up workspace: ${e}"
}
}
}
success {
script {
when {
expression { env.CHANGE_BRANCH?.startsWith('rc') }
}
echo "Attempting to merge PR ${env.CHANGE_ID} into master..."
withCredentials([usernamePassword(credentialsId: 'gitea_creds', usernameVariable: 'GITEA_USER', passwordVariable: 'GITEA_PASS')]) {
def prId = env.CHANGE_ID
sh """
curl -X POST \
-u "${GITEA_USER}:${GITEA_PASS}" \
-H "Content-Type: application/json" \
-d '{"Do":"merge"}' \
http://git.entcor/api/v1/repos/VladislavD/runner-rs/pulls/${prId}/merge
"""
echo "PR ${prId} merged successfully into master!"
}
}
}
failure {
echo "Pipeline failed. Check the logs for details."
}
aborted {
echo "Pipeline was aborted."
}
}
}

View File

@ -1,16 +1,13 @@
# noxis-rs # runner-rs ( with amd64 and riscv64 support )
![Logo](logo.png) ![Logo](https://blog.desdelinux.net/wp-content/uploads/2023/07/rust-logo.png)
### In-container integrating util to handle processes runtime in-container integrating util to handle processes runtime
( with amd64 and riscv64 support )
## Depends on ## Depends on
- `rustup (>=1.27.1)` - `rustup (>=1.27.1)`
- `gcc-riscv64-unknown-elf` - `gcc-riscv64-unknown-elf`
- `build-essential` - `build-essential`
- `gcc-riscv64-linux-gnu`
- `binutils-riscv64-linux-gnu`
## Setting up ## Setting up
Download and execute rustup.sh Download and execute rustup.sh
@ -32,7 +29,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
cd runner-rs/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu cd runner-rs/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu
~~~ ~~~
> [!NOTE] > [!NOTE]
> Cargo is configured to build an app for amd64/linux defaultly. RISCV-based compilation is optional. > Cargo is configured to build an app for amd64/linux defaultly. RISC-based compilation is optional.
3.1. Release build of app for amd64/linux 3.1. Release build of app for amd64/linux

View File

@ -1,75 +0,0 @@
#!/bin/bash
# Скрипт для сборки и копирования бинарников
# Использование: ./build.sh <архитектура>
# Поддерживаемые архитектуры: amd64, riscv64
if [ -z "$1" ]; then
echo "Ошибка: Необходимо указать архитектуру (например, amd64 или riscv64)."
exit 1
fi
ARCH="$1"
TARGET_DIR="release/${ARCH}"
CONTAINER_NAME="e-monitor"
SUPPORTED_ARCHS=("amd64" "riscv64")
if [[ ! " ${SUPPORTED_ARCHS[@]} " =~ " ${ARCH} " ]]; then
echo "Ошибка: Неизвестная архитектура $ARCH. Допустимые значения: ${SUPPORTED_ARCHS[*]}."
exit 1
fi
# На случай, если контейнер с таким именем уже существует
docker stop e-monitor && docker rm e-monitor
echo "Building Docker image..."
docker build --network=host -t e-monitor . || {
echo "Ошибка: Не удалось построить Docker-образ."
exit 1
}
echo "Running Docker container..."
docker run --name "$CONTAINER_NAME" --dns 8.8.8.8 --network=host e-monitor:latest || {
echo "Ошибка: Не удалось запустить Docker-контейнер."
exit 1
}
echo "Creating target directory: $TARGET_DIR"
mkdir -p "$TARGET_DIR"
case "$ARCH" in
amd64)
echo "Copying binaries for architecture: amd64"
docker cp "$CONTAINER_NAME:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-cli" "$TARGET_DIR/" || {
echo "Ошибка: Не удалось скопировать noxis-cli для amd64."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
exit 1
}
docker cp "$CONTAINER_NAME:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-rs" "$TARGET_DIR/" || {
echo "Ошибка: Не удалось скопировать noxis-rs для amd64."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
exit 1
}
;;
riscv64)
echo "Copying binaries for architecture: riscv64"
docker cp "$CONTAINER_NAME:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-cli" "$TARGET_DIR/" || {
echo "Ошибка: Не удалось скопировать noxis-cli для riscv64."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
exit 1
}
docker cp "$CONTAINER_NAME:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-rs" "$TARGET_DIR/" || {
echo "Ошибка: Не удалось скопировать noxis-rs для riscv64."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
exit 1
}
;;
esac
echo "Stopping and removing Docker container..."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
echo "Build and extraction completed successfully for architecture: $ARCH"
exit 0

BIN
logo.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 152 KiB

View File

@ -1,12 +0,0 @@
[package]
name = "noxis-cli"
version = "0.2.4"
edition = "2021"
[dependencies]
anyhow = "1.0.94"
clap = { version = "4.5.22", features = ["derive"] }
serde = { version = "1.0.215", features = ["derive"] }
serde_json = "1.0.133"
thiserror = "2.0.11"
tokio = { version = "1.42.0", features = ["full", "net"] }

View File

@ -1,145 +0,0 @@
use clap::{Parser, Subcommand};
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct Cli {
#[command(
subcommand,
help = "to manage Noxis work",
)]
command : Commands,
}
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
pub enum Commands {
#[command(
about = "To get info about current Noxis status",
)]
Status,
#[command(
about = "To start Noxis process",
)]
Start(StartAction),
#[command(
about = "To stop Noxis process",
)]
Stop,
#[command(
about = "To restart Noxis process",
)]
Restart(StartAction),
#[command(
about = "To get list of processes that are being monitoring",
)]
Processes,
// process command
#[command(
about = "To manage current process that is being monitoring",
)]
Process(ProcessCommand),
// config command =
#[command(
about = "To manage config settings",
)]
Config(ConfigCommand),
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct StartAction {
#[arg(
long="with-flags",
num_args = 1..,
value_delimiter = ' '
)]
flags : Vec<String>,
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct ConfigCommand {
#[command(subcommand)]
action : ConfigAction,
}
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
pub enum ConfigAction {
#[command(
about = "To change current Noxis configuration",
)]
Local(LocalConfig),
#[command(
about = "To change credentials of the remote config server",
)]
Remote,
#[command(
about = "To reset all config settings",
)]
Reset,
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct LocalConfig {
// flag
#[arg(
long = "json",
action,
help = "to read following input as JSON",
)]
is_json : bool,
// value
#[arg(
help = "path to config file or config String (with --json flag)",
)]
config : String,
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct ProcessCommand {
#[arg(
help = "name of needed process",
)]
process : String,
#[command(
subcommand,
help = "To get current process's status",
)]
action : ProcessAction,
}
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
enum ProcessAction {
#[command(
about = "To get info about current process status",
)]
Status,
#[command(
about = "To start current process",
)]
Start,
#[command(
about = "To stop current process",
)]
Stop,
#[command(
about = "To freeze (hybernaze) current process",
)]
Freeze,
#[command(
about = "To unfreeze (unhybernaze) current process",
)]
Unfreeze,
#[command(
about = "To restart current process",
)]
Restart,
#[command(
about = "To get info about current process's dependencies",
)]
Deps,
#[command(
about = "To get info about current process's files-dependencies",
)]
Files,
#[command(
about = "To get info about current process's services-dependencies",
)]
Services,
}

View File

@ -1,14 +0,0 @@
use thiserror::Error;
use super::cli_net::NOXIS_RS_CREDS;
#[derive(Debug, Error)]
pub enum NoxisCliError {
#[error("Can't send any data to {:?}. Noxis-rs daemon is disabled or can't be accessed", NOXIS_RS_CREDS)]
NoxisDaemonMissing,
#[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's web-functionality")]
PortIsNotWritable,
#[error("Can't send Cli-prompt to the Noxis-rs. Check it's state")]
CliPromptCanNotBeSent,
#[error("Can't parse CLI struct and send as byte stream")]
ToStringCliParsingParsing,
}

View File

@ -1,32 +0,0 @@
use tokio::net::TcpStream;
use tokio::io::AsyncWriteExt;
use tokio::time::{Duration, sleep};
use anyhow::Result;
use super::Cli;
use super::cli_error::NoxisCliError;
pub const NOXIS_RS_CREDS: &str = "127.0.0.1:7753";
pub async fn create_tcp_stream() -> Result<TcpStream> {
Ok(TcpStream::connect(NOXIS_RS_CREDS).await.map_err(|_| NoxisCliError::NoxisDaemonMissing)?)
}
pub async fn try_send(stream: Result<TcpStream>, params: Cli) -> Result<()> {
use serde_json::to_string;
let mut stream = stream.map_err(|_| NoxisCliError::NoxisDaemonMissing)?;
loop {
if stream.writable().await.is_err() {
sleep(Duration::from_millis(100)).await;
continue;
}
// let msg: Cli = from_str(&format!("{:?}", params))?;
let msg= to_string(&params).map_err(|_| NoxisCliError::ToStringCliParsingParsing)?;
// let msg = r"HTTP/1.1 POST\r\nContent-Length: 14\r\nContent-Type: text/plain\r\n\r\nHello, World!@";
stream.write_all(msg.as_bytes()).await.map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?;
// ...
break;
}
Ok(())
}

View File

@ -1,5 +0,0 @@
mod cli;
mod cli_net;
mod cli_error;
pub use cli::*;

View File

@ -1,15 +0,0 @@
mod cli;
mod cli_net;
mod cli_error;
use clap::Parser;
use cli::Cli;
use cli_net::{create_tcp_stream, try_send};
use anyhow::Result;
#[tokio::main]
async fn main() -> Result<()>{
let cli = Cli::parse();
try_send(create_tcp_stream().await, cli).await?;
Ok(())
}

View File

@ -1,20 +0,0 @@
[package]
name = "noxis-rs"
version = "0.11.10"
edition = "2021"
[dependencies]
anyhow = "1.0.93"
chrono = "0.4.38"
clap = { version = "4.5.21", features = ["derive"] }
env_logger = "0.11.3"
inotify = "0.10.2"
log = "0.4.22"
pcap = "2.2.0"
redis = "0.25.4"
serde = { version = "1.0.203", features = ["derive"] }
serde_json = "1.0.118"
sysinfo = "0.32.0"
tokio = { version = "1.38.0", features = ["full", "time"] }
noxis-cli = { path = "../noxis-cli" }
dotenv = "0.15.0"

View File

@ -1,106 +0,0 @@
use log::{error, info, warn};
use tokio::net::{TcpListener, TcpStream};
use anyhow::{Result as DynResult, Error};
use tokio::time::{sleep, Duration};
use std::{borrow::BorrowMut, net::{IpAddr, Ipv4Addr}};
// use std::io::BufReader;
use tokio::io::{BufReader, AsyncWriteExt, AsyncBufReadExt};
use noxis_cli::Cli;
use serde_json::from_str;
/// # Fn `init_cli_pipeline`
/// ## for catching all input requests from CLI
///
/// *input* : -
///
/// *output* : `anyhow::Result<()>` to wrap errors
///
/// *initiator* : fn `main`
///
/// *managing* : `TcpListener` object to handle requests
///
/// *depends on* : -
///
pub async fn init_cli_pipeline() -> DynResult<()> {
match init_listener().await {
Some(list) => {
loop {
if let Ok((socket, addr)) = list.accept().await {
// isolation
if IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)) != addr.ip() {
warn!("Declined attempt to connect TCP-socket from {}", addr);
continue;
}
process_connection(socket).await;
}
sleep(Duration::from_millis(500)).await;
}
// Ok(())
},
None => Err(Error::msg("Addr 127.0.0.1:7753 is already in use"))
}
}
/// # Fn `init_listener`
/// ## for creating TCP-listener for communicating with CLI
///
/// *input* : -
///
/// *output* : `Some<TcpListener>` if port 7753 was opened | None if not
///
/// *initiator* : fn `init_cli_pipeline`
///
/// *managing* : `TcpListener` object to handle requests
///
/// *depends on* : `tokio::net::TcpListener`
///
async fn init_listener() -> Option<TcpListener> {
match TcpListener::bind("127.0.0.1:7753").await {
Ok(listener) => {
info!("Runner is listening localhost:7753");
Some(listener)
},
Err(_) => {
error!("Cannot create TCP listener for CLI");
None
}
}
}
/// # Fn `process_connection`
/// ## for processing input CLI requests
///
/// *input* : mut stream: `TcpStream`
///
/// *output* : -
///
/// *initiator* : fn `init_cli_pipeline`
///
/// *managing* : mutable object of `TcpStream`
///
/// *depends on* : `tokio::net::TcpStream`
///
async fn process_connection(mut stream: TcpStream) {
let buf_reader = BufReader::new(stream.borrow_mut());
let mut rqst = buf_reader.lines();
while let Ok(Some(line)) = rqst.next_line().await {
if line.is_empty() {
break
}
match from_str::<Cli>(&line) {
Ok(req) => {
// TODO: func wrapper
dbg!(req);
},
Err(_) => {
break
},
}
println!("{}", line);
}
let response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\nContent-Type: text/plain\r\n\r\nHello, World!";
stream.write_all(response.as_bytes()).await.unwrap();
}

View File

@ -1,382 +0,0 @@
// module to handle pre-boot params of the monitor
#[allow(unused_imports)]
use anyhow::{Result, Ok, Error};
use clap::Parser;
use std::path::PathBuf;
use std::env::var;
use dotenv::dotenv;
const SOCKET_PATH: &str = "/var/run/enode/hostagent.sock";
///
enum EnvVars {
NoxisNoHagent,
NoxisNoLogs,
NoxisRefreshLogs,
NoxisNoRemoteConfig,
NoxisNoConfigSub,
NoxisSocketPath,
NoxisLogTo,
NoxisRemoteServerUrl,
NoxisConfig,
NoxisMetrics,
}
///
impl std::fmt::Display for EnvVars {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
EnvVars::NoxisNoHagent => write!(f, "NOXIS_NO_HAGENT"),
EnvVars::NoxisNoLogs => write!(f, "NOXIS_NO_LOGS"),
EnvVars::NoxisRefreshLogs => write!(f, "NOXIS_REFRESH_LOGS"),
EnvVars::NoxisNoRemoteConfig => write!(f, "NOXIS_NO_REMOTE_CONFIG"),
EnvVars::NoxisNoConfigSub => write!(f, "NOXIS_NO_CONFIG_SUB"),
EnvVars::NoxisSocketPath => write!(f, "NOXIS_SOCKET_PATH"),
EnvVars::NoxisLogTo => write!(f, "NOXIS_LOG_TO"),
EnvVars::NoxisRemoteServerUrl => write!(f, "NOXIS_REMOTE_SERVER_URL"),
EnvVars::NoxisConfig => write!(f, "NOXIS_CONFIG"),
EnvVars::NoxisMetrics => write!(f, "NOXIS_METRICS"),
}
}
}
///
impl<'a> EnvVars {
// Default trait func is not satisfying this issue
fn default(self) -> &'a str {
match self {
EnvVars::NoxisNoHagent => "false",
EnvVars::NoxisNoLogs => "false",
EnvVars::NoxisRefreshLogs => "false",
EnvVars::NoxisNoRemoteConfig => "false",
EnvVars::NoxisNoConfigSub => "false",
EnvVars::NoxisSocketPath => "/var/run/enode/hostagent.sock",
EnvVars::NoxisLogTo => "./",
EnvVars::NoxisRemoteServerUrl => "localhost",
EnvVars::NoxisConfig => "./settings.json",
EnvVars::NoxisMetrics => "full",
}
}
fn process_env_var(self, preboot_value: &str) {
// let default = self.default();
match var(self.to_string()) {
std::result::Result::Ok(val) => {
if val != preboot_value {
std::env::set_var(self.to_string(), self.default());
}
},
Err(_) => {
std::env::set_var(self.to_string(), preboot_value);
},
}
}
pub fn setup(preboot: &PrebootParams) {
// setup default if not exists
// check values and save preboot states in env vars if not equal
Self::NoxisNoHagent.process_env_var(&preboot.no_hostagent.to_string());
Self::NoxisNoLogs.process_env_var(&preboot.no_logs.to_string());
Self::NoxisRefreshLogs.process_env_var(&preboot.refresh_logs.to_string());
Self::NoxisNoRemoteConfig.process_env_var(&preboot.no_remote_config.to_string());
Self::NoxisNoConfigSub.process_env_var(&preboot.no_sub.to_string());
Self::NoxisSocketPath.process_env_var(preboot.socket_path.to_str().unwrap());
Self::NoxisLogTo.process_env_var(preboot.log_to.to_str().unwrap());
Self::NoxisRemoteServerUrl.process_env_var(&preboot.remote_server_url);
Self::NoxisConfig.process_env_var(preboot.config.to_str().unwrap());
Self::NoxisMetrics.process_env_var(&preboot.metrics.to_string());
}
}
/// # Enum `MetricsPrebootParams`
/// ## for setting up metrics mode as preboot param from command prompt
///
/// examples:
/// ``` bash
/// noxis-rs ... --metrics full
/// noxis-rs ... --metrics system
/// noxis-rs ... --metrics processes
/// noxis-rs ... --metrics net
/// noxis-rs ... --metrics none
/// ```
///
#[derive(clap::ValueEnum, Debug, Clone)]
pub enum MetricsPrebootParams {
Full,
System,
Processes,
Net,
None,
}
/// # `std::fmt::Display` implementation for `MetricsPrebootParams`
/// ## to enable parsing object to String
impl std::fmt::Display for MetricsPrebootParams {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
MetricsPrebootParams::Full => write!(f, "full"),
MetricsPrebootParams::System => write!(f, "system"),
MetricsPrebootParams::Processes => write!(f, "processes"),
MetricsPrebootParams::Net => write!(f, "net"),
MetricsPrebootParams::None => write!(f, "none"),
}
}
}
/// # struct `PrebootParams`
/// ## to parse and set up all modes as preboot params from command prompt
///
/// ### args :
///
/// `--no-hagent` - to disable hagent work module and set up work mode as autonomous
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-hagent ...
/// ```
///
///
/// `--no-logs` - to disable logging at all
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-logs ...
/// ```
///
/// `--refresh-logs` - to truncate logs directory
/// ### usage :
/// ``` bash
/// noxis-rs ... --refresh-logs ...
/// ```
///
/// `--no-remote-config` - to disable work with Redis as config producer
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-remote-config ...
/// ```
///
/// `--no-sub` - to disable Redis subscribtion mechanism
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-sub ...
/// ```
///
/// `--socket-path` - to set Unix Domain Socket file's directory
/// ### usage :
/// ``` bash
/// noxis-rs ... --socket-path /var/run/enode/hostagent.sock ...
/// ```
///
/// `--log-to` - to set directory for logs
/// ### usage :
/// ``` bash
/// noxis-rs ... --log-to /dir/to/logs/ ...
/// ```
///
/// `--remote-server-url` - to set Redis Server
/// ### usage :
/// ``` bash
/// noxis-rs ... --remote-server-url 192.168.28.12 ...
/// ```
///
/// `--config` - to set Noxis' config full path
/// ### usage :
/// ``` bash
/// noxis-rs ... --config /etc/enode/settings.json ...
/// ```
///
/// `--metrics` - to set metrics mode
/// ### usage :
/// ``` bash
/// noxis-rs ... --metrics full ...
/// ```
#[derive(Debug, Parser)]
pub struct PrebootParams {
// actions
#[arg(
long = "no-hagent",
action,
conflicts_with="socket_path",
help="To disable work with host-agent"
)]
pub no_hostagent : bool,
#[arg(
long = "no-logs",
action,
conflicts_with="log_to",
help="To disable logs"
)]
pub no_logs: bool,
#[arg(
long = "refresh-logs",
action,
conflicts_with="no_logs",
help="To clear logs directory"
)]
pub refresh_logs : bool,
#[arg(
long = "no-remote-config",
action,
help="To disable work with remote config server",
conflicts_with="no_sub")]
pub no_remote_config : bool,
#[arg(
long = "no-sub",
action,
help="To disable subscription mechanism",
conflicts_with="no_remote_config")]
pub no_sub : bool,
// params (socket_path, log_to, remote_server_url, config)
#[arg(
long = "socket-path",
default_value="/var/run/enode/hostagent.sock",
conflicts_with="no_hostagent",
help="To set .sock file's path used in communication with host-agent"
)]
pub socket_path : PathBuf,
#[arg(
long = "log-to",
default_value="./",
conflicts_with="no_logs",
help="To set a path to logs directory"
)]
pub log_to : PathBuf,
#[arg(
long = "remote-server-url",
default_value="localhost",
conflicts_with="no_remote_config",
help = "To set url of remote config server using in remote config pulling mechanism"
)]
pub remote_server_url : String,
#[arg(
long = "config",
short,
default_value="settings.json",
help="To set local config file path"
)]
pub config : PathBuf,
// value enum params (metrics)
#[arg(
long = "metrics",
short,
default_value_t=MetricsPrebootParams::Full,
help="To set metrics grubbing mode"
)]
pub metrics: MetricsPrebootParams,
}
/// # implementation for `MetricsPrebootParams`
/// ## to enable validation mechanism
impl PrebootParams {
pub fn validate(mut self) -> Result<Self> {
dotenv().ok();
if !self.socket_path.exists() && !self.no_hostagent {
if self.socket_path.to_string_lossy() == SOCKET_PATH {
self.no_hostagent = true;
eprintln!("Warning: Socket-file wasn't found. Working without hostagent module...");
} else {
eprintln!("Warning: Socket-file wasn't found or Noxis can't read it. Socket-file was set to default");
if !PathBuf::from(SOCKET_PATH).exists() {
self.no_hostagent = true;
eprintln!("Warning: Socket-file wasn't found. Working without hostagent module...");
} else {
self.socket_path = PathBuf::from(SOCKET_PATH);
}
}
// return Err(Error::msg("Socket-file not found or Noxis can't read it. Cannot start"));
}
// existing log dir
if !self.log_to.exists() && !self.no_logs {
eprintln!("Error: Log-Dir not found or Noxis can't read it. LogDir was set to default");
self.log_to = PathBuf::from("./");
// return Err(Error::msg("Log Directory Not Found or Noxis can't read it. Cannot start"));
}
// existing sock file
if !self.config.exists() {
eprintln!("Error: Invalid character in config file. Config path was set to default");
let config = PathBuf::from("/etc/settings.json");
if !config.exists() && self.no_remote_config {
return Err(Error::msg("Noxis cannot run without config. Create local config or enable remote-config mechanism"));
}
self.config = PathBuf::from("settings.json");
// return Err(Error::msg("Local Config Not Found or Noxis can't read it. Cannot start"));
}
// redis server check
EnvVars::setup(&self);
Ok(self)
}
}
// unit tests of preboot params parsing mech
#[cfg(test)]
mod preboot_unitests{
use super::*;
#[test]
fn parsing_zero_args() {
assert!(PrebootParams::try_parse_from(vec!["runner-rs"]).is_ok())
}
#[test]
fn parsing_hagent_valid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--socket-path", "/path/to/socket"
]).is_ok())
}
#[test]
fn parsing_hagent_invalid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--socket-path", "/path/to/socket",
"--no-hagent"
]).is_err())
}
#[test]
fn parsing_log_valid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--log-to", "/path/to/log/dir"
]).is_ok())
}
#[test]
fn parsing_log_invalid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--log-to /path/to/log/dir",
"--no-logs"
]).is_err())
}
#[test]
fn parsing_config_valid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--no-sub",
"--remote-server-url", "redis://127.0.0.1"
]).is_ok())
}
#[test]
fn parsing_config_invalid_args_noremote_nosub() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--no-remote-config", "--no-sub"
]).is_err())
}
#[test]
fn parsing_config_invalid_args_noremote_remoteurl() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--no-remote-config",
"--remote-server-url", "redis://127.0.0.1"
]).is_err())
}
#[test]
fn parsing_metrics_args_using_value_enum() {
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "full"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "system"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "processes"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "net"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "none"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "unusual_value"]).is_err());
}
}

View File

@ -1,272 +0,0 @@
pub mod files;
pub mod hagent;
pub mod metrics;
pub mod prcs;
pub mod services;
// TODO : saving current flags state
use crate::options::structs::CustomError;
use crate::options::structs::TrackingProcess;
use files::create_watcher;
use files::file_handler;
use inotify::Inotify;
use log::{error, warn};
use prcs::{
freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process,
unfreeze_process,
};
use services::service_handler;
use std::process::Command;
use std::sync::Arc;
use tokio::join;
use tokio::sync::mpsc;
use tokio::time::Duration;
const GET_ID_CMD: &str = "hostname";
/// # Fn `run_daemons`
/// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `&mut mpsc::Receiver<u8>`,
///
/// *output* : ()
///
/// *initiator* : main thread
///
/// *managing* : Arc to current process struct, Arc to managing channel writer, mut ref to managing channel reader
///
/// *depends on* : all module `prcs`'s functions, fn `running_handler`, fn `utils::files::create_watcher`
///
/// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process
///
pub async fn run_daemons(
proc: Arc<TrackingProcess>,
tx: Arc<mpsc::Sender<u8>>,
rx: &mut mpsc::Receiver<u8>,
) {
// creating watchers + ---buffers---
let mut watchers: Vec<Inotify> = vec![];
for file in proc.dependencies.files.clone().into_iter() {
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
watchers.push(watcher);
} else {
let _ = tx.send(121).await;
}
// watchers.push(create_watcher(&file.filename, &file.src).await.unwrap());
}
let watchers_clone: Arc<tokio::sync::Mutex<Vec<Inotify>>> =
Arc::new(tokio::sync::Mutex::new(watchers));
loop {
let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone());
tokio::select! {
_ = run_hand => continue,
_val = rx.recv() => {
if process_protocol_symbol(proc.clone(), _val.unwrap()).await.is_err() {
return;
}
},
}
tokio::task::yield_now().await;
}
}
async fn process_protocol_symbol(proc: Arc<TrackingProcess>, val: u8) -> Result<(), CustomError>{
match val {
// 1 - File-dependency handling error -> terminating (after waiting)
1 => {
if is_active(&proc.name).await {
error!("File-dependency handling error: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(500)).await;
}
// return;
},
// 2 - File-dependency handling error -> holding (after waiting)
2 => {
if !is_frozen(&proc.name).await {
error!("File-dependency handling error: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
}
},
// 3 - Running process error
3 => {
error!("Error due to starting {} process", &proc.name);
return Err(CustomError::Fatal)
},
// 4 - Timeout of waiting service-dependency -> staying (after waiting)
4 => {
// warn!("Timeout of waiting service-dependency: Ignoring on {} process ..." , &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 5 - Timeout of waiting service-dependency -> terminating (after waiting)
5 => {
if is_active(&proc.name).await {
error!("Timeout of waiting service-dependency: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(500)).await;
}
},
// 6 - Timeout of waiting service-dependency -> holding (after waiting)
6 => {
// println!("holding {}-{}", proc.name, is_active(&proc.name).await);
if !is_frozen(&proc.name).await {
error!("Timeout of waiting service-dependency: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_secs(1)).await;
}
},
// // 7 - File-dependency change -> terminating (after check)
7 => {
error!("File-dependency warning (file changed). Terminating {} process...", &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
return Err(CustomError::Fatal)
},
// // 8 - File-dependency change -> restarting (after check)
8 => {
warn!("File-dependency warning (file changed). Restarting {} process...", &proc.name);
let _ = restart_process(&proc.name, &proc.path).await;
tokio::time::sleep(Duration::from_millis(100)).await;
},
// // 9 - File-dependency change -> staying (after check)
9 => {
// no need to trash logs
warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 10 - Process unfreaze call via file handler (or service handler)
10 | 11 => {
if is_frozen(&proc.name).await {
warn!("Unfreezing process {} call...", &proc.name);
unfreeze_process(&proc.name).await;
}
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 11 - Process unfreaze call via service handler
// 11 => {
// if is_frozen(&proc.name).await {
// warn!("Unfreezing process {} call...", &proc.name);
// unfreeze_process(&proc.name).await;
// }
// tokio::time::sleep(Duration::from_millis(100)).await;
// },
// 101 - Impermissible trigger values in JSON
101 => {
error!("Impermissible trigger values in JSON in {}'s block. Killing thread...", &proc.name);
if is_active(&proc.name).await {
terminate_process(&proc.name).await;
}
return Err(CustomError::Fatal)
},
//
// 121 - Cannot create valid watcher for file dependency
// todo : think about valid situation
121 => {
error!("Cannot create valid watcher for file dependency. Terminating {} process...", &proc.name);
let _ = terminate_process(&proc.name).await;
return Err(CustomError::Fatal)
},
// 111 - global thread termination with killing current child in a face
// of a current process
111 => {
warn!("Terminating {}'s child processes...", &proc.name);
match is_active(&proc.name).await {
true => {
terminate_process(&proc.name).await;
},
false => {
log::info!("Process {} is already terminated!", proc.name);
},
}
},
_ => {},
}
Ok(())
}
// check process status daemon
/// # Fn `run_daemons`
/// ## func to async exec subjobs of checking process, services and files states
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `Arc<tokio::sync::Mutex<Vec<Inotify>>>`
///
/// *output* : ()
///
/// *initiator* : fn `run_daemons`
///
/// *managing* : Arc to current process struct, Arc to Mutex to list of file watchers
///
/// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}`
///
pub async fn running_handler(
prc: Arc<TrackingProcess>,
tx: Arc<mpsc::Sender<u8>>,
watchers: Arc<tokio::sync::Mutex<Vec<Inotify>>>,
) {
// services and files check (once)
let files_check = file_handler(
&prc.name,
&prc.dependencies.files,
tx.clone(),
watchers.clone(),
);
let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone());
let res = join!(files_check, services_check);
// if inactive -> spawn checks -> active is true
if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
if start_process(&prc.name, &prc.path).await.is_err() {
tx.send(3).await.unwrap();
return;
}
}
// if frozen -> spawn checks -> unfreeze is true
else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
tx.send(10).await.unwrap();
return;
}
// tokio::time::sleep(Duration::from_millis(100)).await;
tokio::task::yield_now().await;
}
// todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}'
/// # Fn `get_container_id`
/// ## for getting container id used in logs
///
/// *input* : -
///
/// *output* : Some(String) if cont-id was grubbed | None - if not
///
/// *initiator* : fn `options::logger::setup_logger`
///
/// *managing* : -
///
/// *depends on* : -
///
pub fn get_container_id() -> Option<String> {
match Command::new(GET_ID_CMD).output() {
Ok(output) => {
if !output.status.success() {
return None;
}
let id = String::from_utf8_lossy(&output.stdout).to_string();
if id.is_empty() {
return None;
}
Some(String::from_utf8_lossy(&output.stdout).to_string())
}
Err(_) => None,
}
}
#[cfg(test)]
mod utils_unittests {
use super::get_container_id;
#[test]
fn check_if_container_id_can_be_grabed() {
assert!(get_container_id().is_some());
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -4,12 +4,12 @@
"processes": [ "processes": [
{ {
"name": "temp-process", "name": "temp-process",
"path": "./temp-process", "path": "/home/user/monitor/runner-rs/temp-process",
"dependencies": { "dependencies": {
"files": [ "files": [
{ {
"filename": "dep-file", "filename": "dep-file",
"src": "./tests/examples/", "src": "/home/user/monitor/runner-rs/tests/examples/",
"triggers": { "triggers": {
"onDelete": "stop", "onDelete": "stop",
"onChange": "stay" "onChange": "stay"
@ -31,3 +31,4 @@
} }
] ]
} }

View File

@ -1,33 +1,27 @@
mod options; mod options;
mod utils; mod utils;
use anyhow::Error;
use clap::Parser;
use log::{error, info}; use log::{error, info};
use options::config::*; use options::config::*;
use options::logger::setup_logger; use options::logger::setup_logger;
use options::signals::set_valid_destructor; use options::signals::set_valid_destructor;
use options::structs::Processes; use options::structs::*;
use options::cli_pipeline::init_cli_pipeline;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use utils::*; use utils::*;
use options::preboot::PrebootParams;
#[tokio::main(flavor = "multi_thread")] #[tokio::main(flavor = "multi_thread")]
async fn main() -> anyhow::Result<()>{ async fn main() {
let preboot = Arc::new(PrebootParams::parse().validate()?);
let _ = setup_logger(); let _ = setup_logger();
info!("Runner is configurating..."); info!("Runner is configurating...");
// setting up redis connection \ // setting up redis connection \
// then conf checks to choose the most actual \ // then conf checks to choose the most actual \
let processes: Processes = get_actual_config(preboot.clone()).await.unwrap_or_else(|| { let processes: Processes = get_actual_config().await.unwrap_or_else(|| {
error!("No actual configuration for runner. Stopping..."); error!("No actual configuration for runner. Stopping...");
std::process::exit(1); std::process::exit(101);
}); });
info!( info!(
@ -38,7 +32,7 @@ async fn main() -> anyhow::Result<()>{
if processes.processes.is_empty() { if processes.processes.is_empty() {
error!("Processes list is null, runner-rs initialization is stopped"); error!("Processes list is null, runner-rs initialization is stopped");
return Err(Error::msg("Empty processes segment in config")); return;
} }
let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![]; let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![];
// is in need to send to the signals handler thread // is in need to send to the signals handler thread
@ -81,18 +75,13 @@ async fn main() -> anyhow::Result<()>{
// remote config update subscription // remote config update subscription
handler.push(tokio::spawn(async move { handler.push(tokio::spawn(async move {
let _ = subscribe_config_stream(Arc::new(processes), preboot.clone()).await; let _ = subscribe_config_stream(Arc::new(processes)).await;
}));
// cli pipeline
handler.push(tokio::spawn(async move {
let _ = init_cli_pipeline().await;
})); }));
for i in handler { for i in handler {
let _ = i.await; let _ = i.await;
} }
Ok(()) return;
} }
// todo: integration tests // todo: integration tests

View File

@ -4,5 +4,3 @@ pub mod config;
pub mod logger; pub mod logger;
pub mod signals; pub mod signals;
pub mod structs; pub mod structs;
pub mod preboot;
pub mod cli_pipeline;

View File

@ -1,4 +1,4 @@
use super::structs::*; use crate::options::structs::*;
use log::{error, info, warn}; use log::{error, info, warn};
use redis::{Client, Connection}; use redis::{Client, Connection};
use std::fs::OpenOptions; use std::fs::OpenOptions;
@ -7,10 +7,9 @@ use std::os::unix::process::CommandExt;
use std::process::Command; use std::process::Command;
use std::sync::Arc; use std::sync::Arc;
use std::{env, fs}; use std::{env, fs};
use super::preboot::PrebootParams; use tokio::time::Duration;
use tokio::time::{Duration, sleep};
// const CONFIG_PATH: &str = "settings.json"; const CONFIG_PATH: &str = "settings.json";
/// # Fn `load_processes` /// # Fn `load_processes`
/// ## for reading and parsing *local* storing config /// ## for reading and parsing *local* storing config
@ -47,51 +46,43 @@ fn load_processes(json_filename: &str) -> Option<Processes> {
/// ///
/// *depends on* : struct `Processes` /// *depends on* : struct `Processes`
/// ///
pub async fn get_actual_config(params : Arc<PrebootParams>) -> Option<Processes> { pub async fn get_actual_config() -> Option<Processes> {
// * if no local conf -> loop and +inf getting conf from redis server // * if no local conf -> loop and +inf getting conf from redis server
// * if local conf -> once getting conf from redis server // * if local conf -> once getting conf from redis server
let config_path = params.config.to_str().unwrap_or_else(|| { match load_processes(CONFIG_PATH) {
error!("Invalid character in config file. Config path was set to default");
"settings.json"
});
info!("Configurating config module with params: no-remote-config={}, no-sub={}, local config path={:?}, remote server={}", params.no_remote_config, params.no_sub, params.config, params.remote_server_url);
match load_processes(config_path) {
Some(local_conf) => { Some(local_conf) => {
info!( info!(
"Found local configuration, version - {}", "Found local configuration, version - {}",
&local_conf.date_of_creation &local_conf.date_of_creation
); );
if !params.no_remote_config { if let Some(remote_conf) =
if let Some(remote_conf) = // TODO : rework with pubsub mech
// TODO : rework with pubsub mech once_get_remote_configuration(&format!("redis://{}/", local_conf.config_server))
once_get_remote_configuration(&format!("redis://{}/", &params.remote_server_url)) {
{ return match config_comparing(&local_conf, &remote_conf) {
return match config_comparing(&local_conf, &remote_conf) { ConfigActuality::Local => {
ConfigActuality::Local => { info!("Local config is actual");
info!("Local config is actual"); Some(local_conf)
Some(local_conf) }
ConfigActuality::Remote => {
info!("Pulled config is more actual. Saving changes!");
if save_new_config(&remote_conf, CONFIG_PATH).is_err() {
error!("Saving changes process failed due to unexpected error...")
} }
ConfigActuality::Remote => { Some(remote_conf)
info!("Pulled config is more actual. Saving changes!"); }
if save_new_config(&remote_conf, config_path).is_err() { };
error!("Saving changes process failed due to unexpected error...")
}
Some(remote_conf)
}
};
}
} }
Some(local_conf) Some(local_conf)
} }
None => { None => {
warn!("No local valid conf was found. Trying to pull remote one..."); warn!("No local valid conf was found. Trying to pull remote one...");
if !params.no_remote_config { let mut conn = get_connection_watcher(&open_watcher("redis://localhost/"));
let mut conn = get_connection_watcher(&open_watcher(&format!("redis://{}/", &params.remote_server_url))); let remote_config = get_remote_conf_watcher(&mut conn).await;
if let Some(conf) = get_remote_conf_watcher(&mut conn).await { if let Some(conf) = remote_config {
info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation); info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation);
let _ = save_new_config(&conf, config_path); let _ = save_new_config(&conf, CONFIG_PATH);
return Some(conf); return Some(conf);
}
} }
None None
} }
@ -191,22 +182,23 @@ fn once_get_remote_configuration(serv_info: &str) -> Option<Processes> {
if remote.is_none() { if remote.is_none() {
error!("Pulled config is invalid. Check it in Redis Server"); error!("Pulled config is invalid. Check it in Redis Server");
} }
remote return remote;
}, },
Err(_) => { Err(_) => {
error!("Cannot extract payload from new message. Check Redis Server state"); error!("Cannot extract payload from new message. Check Redis Server state");
None return None;
}, },
} }
}, },
Err(_) => { Err(_) => {
None warn!("Cannot get config from Redis Server. Empty channel");
return None;
}, },
} }
}, },
Err(_) => { Err(_) => {
error!("Redis subscription process failed. Check Redis configuration!"); error!("Redis subscription process failed. Check Redis configuration!");
None return None;
} }
} }
} }
@ -319,13 +311,8 @@ fn restart_main_thread() -> std::io::Result<()> {
/// ///
/// *depends on* : `Processes` /// *depends on* : `Processes`
/// ///
pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<PrebootParams>) -> Result<(), CustomError> { pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(), CustomError> {
let config_path = params.config.to_str().unwrap_or_else(|| "settings.json"); if let Ok(client) = Client::open(format!("redis://{}/", &actual_prcs.config_server)) {
if params.no_sub || params.no_remote_config {
return Err(CustomError::Fatal);
}
if let Ok(client) = Client::open(format!("redis://{}/", &params.remote_server_url)) {
if let Ok(mut conn) = client.get_connection() { if let Ok(mut conn) = client.get_connection() {
match crate::utils::get_container_id() { match crate::utils::get_container_id() {
Some(channel_name) => { Some(channel_name) => {
@ -335,6 +322,7 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<Pr
info!("Runner subscribed on config update publishing in channel {}", &channel_name); info!("Runner subscribed on config update publishing in channel {}", &channel_name);
loop { loop {
if let Ok(msg) = pubsub.get_message() { if let Ok(msg) = pubsub.get_message() {
info!("New config was pulled from Redis Server");
let get_remote_config: Result<String, redis::RedisError> = msg.get_payload(); let get_remote_config: Result<String, redis::RedisError> = msg.get_payload();
match get_remote_config { match get_remote_config {
Ok(payload) => { Ok(payload) => {
@ -342,8 +330,8 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<Pr
match config_comparing(&actual_prcs, &remote_config) { match config_comparing(&actual_prcs, &remote_config) {
ConfigActuality::Remote => { ConfigActuality::Remote => {
warn!("Pulled config is actual. Saving and restarting..."); warn!("Pulled config is actual. Saving and restarting...");
if save_new_config(&remote_config, config_path).is_err() { if save_new_config(&remote_config, CONFIG_PATH).is_err() {
error!("Error with saving new config to {}. Stopping sub mechanism...", config_path); error!("Error with saving new config to {}. Stopping sub mechanism...", &CONFIG_PATH);
return Err(CustomError::Fatal); return Err(CustomError::Fatal);
} }
if restart_main_thread().is_err() { if restart_main_thread().is_err() {
@ -351,10 +339,7 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<Pr
return Err(CustomError::Fatal); return Err(CustomError::Fatal);
} }
} }
_ => { _ => continue,
warn!("Pulled new config. Current config is more actual ...");
continue
},
} }
} }
else { else {
@ -367,7 +352,7 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<Pr
}, },
} }
} }
sleep(Duration::from_secs(30)).await; tokio::time::sleep(tokio::time::Duration::from_secs(30)).await;
} }
} else { } else {
error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name); error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name);
@ -448,7 +433,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr
Err(_) => Err(CustomError::Fatal), Err(_) => Err(CustomError::Fatal),
} }
} }
Err(_) => Err(CustomError::Fatal), Err(_) => return Err(CustomError::Fatal),
} }
} }
Err(_) => Err(CustomError::Fatal), Err(_) => Err(CustomError::Fatal),

View File

@ -61,27 +61,8 @@ pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> {
#[cfg(test)] #[cfg(test)]
mod logger_tests { mod logger_tests {
use super::*; use super::*;
// #[test]
// fn setting_up_logger() {
// assert!(setup_logger().is_ok());
// }
#[test] #[test]
fn setting_up_logger() { fn setting_up_logger() {
Builder::new() assert!(setup_logger().is_ok());
.format(move |buf, record| {
writeln!(
buf,
"|{}| {} [{}] - {}",
get_container_id().unwrap_or("NODE".to_string()).trim(),
Local::now().format("%d-%m-%Y %H:%M:%S"),
record.level(),
record.args(),
)
})
.filter(None, LevelFilter::Info)
.target(env_logger::Target::Stdout)
.is_test(true)
.init();
} }
} }

View File

@ -1,4 +1,4 @@
use super::structs::CustomError; use crate::options::structs::CustomError;
use std::sync::Arc; use std::sync::Arc;
use tokio::io; use tokio::io;
use tokio::sync::mpsc; use tokio::sync::mpsc;

View File

@ -1,5 +1,3 @@
#![allow(dead_code)]
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -7,7 +5,6 @@ use serde::{Deserialize, Serialize};
pub enum CustomError { pub enum CustomError {
Fatal, Fatal,
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum ConfigActuality { pub enum ConfigActuality {
Local, Local,
@ -21,7 +18,7 @@ pub enum ConfigActuality {
/// ///
/// *depends on* : `TrackingProcess` /// *depends on* : `TrackingProcess`
/// ///
/// ``` json /// ```
/// { /// {
/// -> "dateOfCreation": "1721381809104", /// -> "dateOfCreation": "1721381809104",
/// -> "configServer": "localhost", /// -> "configServer": "localhost",
@ -47,7 +44,7 @@ pub struct Processes {
/// ///
/// *depends on* : `Dependencies` /// *depends on* : `Dependencies`
/// ///
/// ``` json /// ```
/// ... /// ...
/// "processes": [ /// "processes": [
/// -> { /// -> {
@ -72,7 +69,7 @@ pub struct TrackingProcess {
/// ///
/// *depends on* : `Files`, `Services` /// *depends on* : `Files`, `Services`
/// ///
/// ``` json /// ```
/// ... /// ...
/// "path": "/home/user/monitor/runner-rs/temp-process", /// "path": "/home/user/monitor/runner-rs/temp-process",
/// -> "dependencies": { /// -> "dependencies": {
@ -96,7 +93,7 @@ pub struct Dependencies {
/// ///
/// *depends on* : `FileTriggers` /// *depends on* : `FileTriggers`
/// ///
/// ``` json /// ```
/// ... /// ...
/// "files": [ /// "files": [
/// -> { /// -> {
@ -121,7 +118,7 @@ pub struct Files {
/// ///
/// *depends on* : `ServiceTriggers` /// *depends on* : `ServiceTriggers`
/// ///
/// ``` json /// ```
/// ... /// ...
/// "services": [ /// "services": [
/// -> { /// -> {
@ -146,7 +143,7 @@ pub struct Services {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
/// ``` json /// ```
/// ... /// ...
/// "port": 443, /// "port": 443,
/// -> "triggers": { /// -> "triggers": {
@ -171,7 +168,7 @@ pub struct ServiceTriggers {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
/// ``` json /// ```
/// ... /// ...
/// "src": "/home/user/monitor/runner-rs/tests/examples/", /// "src": "/home/user/monitor/runner-rs/tests/examples/",
/// -> "triggers": { /// -> "triggers": {

265
src/utils.rs Normal file
View File

@ -0,0 +1,265 @@
pub mod files;
pub mod hagent;
pub mod metrics;
pub mod prcs;
pub mod services;
//
use crate::options::structs::TrackingProcess;
use files::create_watcher;
use files::file_handler;
use inotify::Inotify;
use log::{error, warn};
use prcs::{
freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process,
unfreeze_process,
};
use services::service_handler;
use std::process::Command;
use std::sync::Arc;
use tokio::join;
use tokio::sync::mpsc;
use tokio::time::Duration;
const GET_ID_CMD: &str = "hostname";
/// # Fn `run_daemons`
/// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `&mut mpsc::Receiver<u8>`,
///
/// *output* : ()
///
/// *initiator* : main thread
///
/// *managing* : Arc to current process struct, Arc to managing channel writer, mut ref to managing channel reader
///
/// *depends on* : all module `prcs`'s functions, fn `running_handler`, fn `utils::files::create_watcher`
///
/// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process
///
pub async fn run_daemons(
proc: Arc<TrackingProcess>,
tx: Arc<mpsc::Sender<u8>>,
rx: &mut mpsc::Receiver<u8>,
) {
// creating watchers + ---buffers---
let mut watchers: Vec<Inotify> = vec![];
for file in proc.dependencies.files.clone().into_iter() {
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
watchers.push(watcher);
} else {
let _ = tx.send(121).await;
}
// watchers.push(create_watcher(&file.filename, &file.src).await.unwrap());
}
let watchers_clone: Arc<tokio::sync::Mutex<Vec<Inotify>>> =
Arc::new(tokio::sync::Mutex::new(watchers));
loop {
let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone());
tokio::select! {
_ = run_hand => {},
_val = rx.recv() => {
match _val.unwrap() {
// 1 - File-dependency handling error -> terminating (after waiting)
1 => {
if is_active(&proc.name).await {
error!("File-dependency handling error: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
}
return;
},
// 2 - File-dependency handling error -> holding (after waiting)
2 => {
if !is_frozen(&proc.name).await {
error!("File-dependency handling error: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
}
},
// 3 - Running process error
3 => {
error!("Error due to starting {} process", &proc.name);
break;
},
// 4 - Timeout of waiting service-dependency -> staying (after waiting)
4 => {
// warn!("Timeout of waiting service-dependency: Ignoring on {} process ..." , &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 5 - Timeout of waiting service-dependency -> terminating (after waiting)
5 => {
if is_active(&proc.name).await {
error!("Timeout of waiting service-dependency: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(1000)).await;
}
},
// 6 - Timeout of waiting service-dependency -> holding (after waiting)
6 => {
// println!("holding {}-{}", proc.name, is_active(&proc.name).await);
if !is_frozen(&proc.name).await {
error!("Timeout of waiting service-dependency: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_secs(1)).await;
}
},
// // 7 - File-dependency change -> terminating (after check)
7 => {
error!("File-dependency warning (file changed). Terminating {} process...", &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
return;
},
// // 8 - File-dependency change -> restarting (after check)
8 => {
warn!("File-dependency warning (file changed). Restarting {} process...", &proc.name);
let _ = restart_process(&proc.name, &proc.path).await;
tokio::time::sleep(Duration::from_millis(100)).await;
},
// // 9 - File-dependency change -> staying (after check)
9 => {
// no need to trash logs
warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 10 - Process unfreaze call via file handler (or service handler)
10 | 11 => {
if is_frozen(&proc.name).await {
warn!("Unfreezing process {} call...", &proc.name);
unfreeze_process(&proc.name).await;
}
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 11 - Process unfreaze call via service handler
// 11 => {
// if is_frozen(&proc.name).await {
// warn!("Unfreezing process {} call...", &proc.name);
// unfreeze_process(&proc.name).await;
// }
// tokio::time::sleep(Duration::from_millis(100)).await;
// },
// 101 - Impermissible trigger values in JSON
101 => {
error!("Impermissible trigger values in JSON in {}'s block. Killing thread...", proc.name);
if is_active(&proc.name).await {
terminate_process(&proc.name).await;
}
break;
},
//
// 121 - Cannot create valid watcher for file dependency
121 => {
error!("Cannot create valid watcher for {}'s file dependency. Terminating thread...", proc.name);
let _ = terminate_process("runner-rs").await;
break;
},
// 111 - global thread termination with killing current child in a face
// of a current process
111 => {
warn!("Terminating {}'s child processes...", &proc.name);
match is_active(&proc.name).await {
true => {
terminate_process(&proc.name).await;
},
false => {
log::info!("Process {} is already terminated!", proc.name);
},
}
break;
},
_ => {},
}
},
}
tokio::task::yield_now().await;
}
tokio::task::yield_now().await;
}
// check process status daemon
/// # Fn `run_daemons`
/// ## func to async exec subjobs of checking process, services and files states
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `Arc<tokio::sync::Mutex<Vec<Inotify>>>`
///
/// *output* : ()
///
/// *initiator* : fn `run_daemons`
///
/// *managing* : Arc to current process struct, Arc to Mutex to list of file watchers
///
/// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}`
///
pub async fn running_handler(
prc: Arc<TrackingProcess>,
tx: Arc<mpsc::Sender<u8>>,
watchers: Arc<tokio::sync::Mutex<Vec<Inotify>>>,
) {
// services and files check (once)
let files_check = file_handler(
&prc.name,
&prc.dependencies.files,
tx.clone(),
watchers.clone(),
);
let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone());
let res = join!(files_check, services_check);
// if inactive -> spawn checks -> active is true
if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
if start_process(&prc.name, &prc.path).await.is_err() {
tx.send(3).await.unwrap();
return;
}
}
// if frozen -> spawn checks -> unfreeze is true
else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
tx.send(10).await.unwrap();
return;
}
// tokio::time::sleep(Duration::from_millis(100)).await;
tokio::task::yield_now().await;
}
// todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}'
/// # Fn `get_container_id`
/// ## for getting container id used in logs
///
/// *input* : -
///
/// *output* : Some(String) if cont-id was grubbed | None - if not
///
/// *initiator* : fn `options::logger::setup_logger`
///
/// *managing* : -
///
/// *depends on* : -
///
pub fn get_container_id() -> Option<String> {
match Command::new(GET_ID_CMD).output() {
Ok(output) => {
if !output.status.success() {
return None;
}
let id = String::from_utf8_lossy(&output.stdout).to_string();
if id.is_empty() {
return None;
}
Some(String::from_utf8_lossy(&output.stdout).to_string())
}
Err(_) => None,
}
}
#[cfg(test)]
mod utils_unittests {
use super::get_container_id;
#[test]
fn check_if_container_id_can_be_grabed() {
assert!(get_container_id().is_some());
}
}

View File

@ -1,5 +1,5 @@
use crate::options::structs::{CustomError, Files}; use crate::options::structs::{CustomError, Files};
use super::prcs::{is_active, is_frozen}; use crate::utils::prcs::{is_active, is_frozen};
use inotify::{EventMask, Inotify, WatchMask}; use inotify::{EventMask, Inotify, WatchMask};
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::path::Path; use std::path::Path;
@ -98,10 +98,7 @@ pub async fn file_handler(
// * watcher recreation after dealing with file recreation mechanism in text editors // * watcher recreation after dealing with file recreation mechanism in text editors
let mutex = notify.borrow_mut(); let mutex = notify.borrow_mut();
// *mutex = create_watcher(&file.filename, &file.src).await.unwrap(); *mutex = create_watcher(&file.filename, &file.src).await.unwrap();
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
*mutex = watcher;
}
} }
match file.triggers.on_change.as_str() { match file.triggers.on_change.as_str() {
"stop" => { "stop" => {
@ -162,22 +159,22 @@ mod files_unittests {
use super::*; use super::*;
#[tokio::test] #[tokio::test]
async fn try_to_create_watcher() { async fn try_to_create_watcher() {
let res = create_watcher("dep-file", "./tests/examples/").await; let res = create_watcher("dep-file", "/home/user/monitor/runner-rs/tests/examples/").await;
assert!(res.is_ok()); assert!(res.is_ok());
} }
#[tokio::test] #[tokio::test]
async fn try_to_create_invalid_watcher() { async fn try_to_create_invalid_watcher() {
let res = create_watcher("invalid-file", "/path/to/the/no/dir").await; let res = create_watcher("invalid-file", "/path/to/the/hell").await;
assert!(res.is_err()); assert!(res.is_err());
} }
#[tokio::test] #[tokio::test]
async fn check_existing_file() { async fn check_existing_file() {
let res = check_file("dep-file", "./tests/examples/").await; let res = check_file("dep-file", "/home/user/monitor/runner-rs/tests/examples/").await;
assert!(res.is_ok()); assert!(res.is_ok());
} }
#[tokio::test] #[tokio::test]
async fn check_non_existing_file() { async fn check_non_existing_file() {
let res = check_file("invalid-file", "/path/to/the/no/dir").await; let res = check_file("invalid-file", "/path/to/the/hell").await;
assert!(res.is_err()); assert!(res.is_err());
} }
} }

View File

@ -1,11 +1,5 @@
//
// module needed to check host-agent health condition and to communicate with it // module needed to check host-agent health condition and to communicate with it
//
use tokio::{io::Interest, net::UnixStream}; use tokio::{io::Interest, net::UnixStream};
use anyhow::{Ok, Result, Error};
// to kill lint bug
#[allow(unused_imports)]
use tokio::net::UnixListener;
/// # Fn `open_unix_socket` /// # Fn `open_unix_socket`
/// ## opening unix-socket for host-agent communication /// ## opening unix-socket for host-agent communication
@ -20,10 +14,9 @@ use tokio::net::UnixListener;
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)] async fn open_unix_socket() -> Result<UnixStream, std::io::Error> {
async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error> { let socket = UnixStream::connect("/var/run/enode/hostagent.sock").await?;
// "/var/run/enode/hostagent.sock" Ok(socket)
UnixStream::connect(sock_path).await
} }
/// # Fn `ha_healthcheck` /// # Fn `ha_healthcheck`
@ -39,11 +32,15 @@ async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error>
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)] async fn ha_healthcheck(socket: &UnixStream) -> Result<(), std::io::Error >{
async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
socket.ready(Interest::WRITABLE).await?; socket.ready(Interest::WRITABLE).await?;
socket.writable().await?; if socket.writable().await.is_ok() {
socket.try_write(b"Hello HAgent")?; if let Err(er) = socket.try_write(b"Hello HAgent") {
return Err(er);
}
} else {
return Err(std::io::ErrorKind::WouldBlock.into());
}
Ok(()) Ok(())
} }
@ -60,37 +57,34 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)] async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), std::io::Error > {
async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error > {
socket.ready(Interest::WRITABLE).await?; socket.ready(Interest::WRITABLE).await?;
socket.writable().await?; if socket.writable().await.is_ok() {
socket.try_write(data.as_bytes())?; if let Err(er) = socket.try_write(data.as_bytes()) {
return Err(er);
}
} else {
return Err(std::io::ErrorKind::WouldBlock.into());
}
Ok(()) Ok(())
} }
#[cfg(test)] #[cfg(test)]
mod hagent_unittets { mod hagent_unittets {
use super::*; use super::*;
const TEST_SOCKET: &str = "./tests/examples/hagent_test.sock"; #[tokio::test]
// maybe bool : true -> alive, false -> dead
async fn init_listener() -> UnixListener { // simple request on api
let _ = std::fs::remove_file(TEST_SOCKET); async fn hagent_healthcheck() {
UnixListener::bind(TEST_SOCKET).unwrap() let sock = open_unix_socket().await;
assert!(sock.is_ok());
let sock = sock.unwrap();
assert!(ha_healthcheck(&sock).await.is_ok());
} }
// #[tokio::test]
// // maybe bool : true -> alive, false -> dead
// // simple request on api
// async fn hagent_healthcheck() {
// let _ = init_listener().await;
// let sock = open_unix_socket(TEST_SOCKET).await;
// assert!(sock.is_ok());
// let sock = sock.unwrap();
// assert!(ha_healthcheck(&sock).await.is_ok());
// }
#[tokio::test] #[tokio::test]
// --Result<maybe Response> // --Result<maybe Response>
// one-shot func // one-shot func
async fn hagent_communication_test() { async fn send_metrics_to_hagent() {
use crate::options::structs::{ProcessMetrics, ContainerMetrics, Metrics}; use crate::options::structs::{ProcessMetrics, ContainerMetrics, Metrics};
let procm = ProcessMetrics::new("test-prc", 15.0, 5.0); let procm = ProcessMetrics::new("test-prc", 15.0, 5.0);
@ -98,9 +92,7 @@ mod hagent_unittets {
let metrics = Metrics::new(contm, vec![procm]); let metrics = Metrics::new(contm, vec![procm]);
let metrics = &serde_json::to_string_pretty(&metrics).unwrap(); let metrics = &serde_json::to_string_pretty(&metrics).unwrap();
#[allow(unused_mut)] let sock = open_unix_socket().await;
let mut _list = init_listener().await;
let sock = open_unix_socket(TEST_SOCKET).await;
assert!(sock.is_ok()); assert!(sock.is_ok());
let sock = sock.unwrap(); let sock = sock.unwrap();
assert!(ha_healthcheck(&sock).await.is_ok()); assert!(ha_healthcheck(&sock).await.is_ok());
@ -109,6 +101,6 @@ mod hagent_unittets {
} }
#[tokio::test] #[tokio::test]
async fn open_unixsocket_test() { async fn open_unixsocket_test() {
assert!(open_unix_socket("non/valid/socket/file.sock").await.is_err()); assert!(open_unix_socket().await.is_ok());
} }
} }

View File

@ -7,7 +7,7 @@ use crate::options::structs::TrackingProcess;
use sysinfo::{Process, System}; use sysinfo::{Process, System};
use tokio::join; use tokio::join;
use crate::options::structs::{ProcessMetrics, ContainerMetrics}; use crate::options::structs::{ProcessMetrics, ContainerMetrics};
use super::get_container_id; use crate::utils::get_container_id;
// use pcap::{Device, Capture, Active}; // use pcap::{Device, Capture, Active};
// use std::net::Ipv4Addr; // use std::net::Ipv4Addr;
// use anyhow::{Result, Ok}; // use anyhow::{Result, Ok};
@ -27,7 +27,6 @@ use super::get_container_id;
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)]
pub async fn init_metrics_grubber() { pub async fn init_metrics_grubber() {
let mut system = System::new(); let mut system = System::new();
// let mut buffer: Vec<PacketInfo> = vec![]; // let mut buffer: Vec<PacketInfo> = vec![];
@ -40,8 +39,6 @@ pub async fn init_metrics_grubber() {
// let _ = capture_packets(shared_buf.clone()).await; // let _ = capture_packets(shared_buf.clone()).await;
} }
#[allow(dead_code)]
#[allow(unused_variables)]
async fn gather_metrics(proc: Arc<Process>) { async fn gather_metrics(proc: Arc<Process>) {
} }
@ -95,7 +92,6 @@ async fn gather_metrics(proc: Arc<Process>) {
/// ///
/// *depends on* : `TrackingProcess` /// *depends on* : `TrackingProcess`
/// ///
#[allow(dead_code)]
async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProcess>>) -> ContainerMetrics { async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProcess>>) -> ContainerMetrics {
let metrics = join!( let metrics = join!(
get_cpu_metrics_container(sys.clone()), get_cpu_metrics_container(sys.clone()),
@ -123,7 +119,6 @@ async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProce
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)]
async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 { async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 {
sys.global_cpu_usage() sys.global_cpu_usage()
} }
@ -141,7 +136,6 @@ async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)]
async fn get_ram_metrics_container(sys: Arc<System>) -> f32 { async fn get_ram_metrics_container(sys: Arc<System>) -> f32 {
(sys.used_memory() / sys.total_memory()) as f32 * 100.0 (sys.used_memory() / sys.total_memory()) as f32 * 100.0
} }
@ -162,7 +156,6 @@ async fn get_ram_metrics_container(sys: Arc<System>) -> f32 {
/// ///
/// *depends on* : `TrackingProcess` /// *depends on* : `TrackingProcess`
/// ///
#[allow(dead_code)]
async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> { async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> {
prcs.iter().map(|process| process.name.clone()).collect() prcs.iter().map(|process| process.name.clone()).collect()
} }
@ -180,7 +173,6 @@ async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)]
async fn get_all_metrics_process(proc: Arc<Process>, sys: Arc<System>) -> ProcessMetrics { async fn get_all_metrics_process(proc: Arc<Process>, sys: Arc<System>) -> ProcessMetrics {
let metrics = join!( let metrics = join!(
get_cpu_metrics_process(proc.clone()), get_cpu_metrics_process(proc.clone()),

View File

@ -233,15 +233,14 @@ mod process_unittests {
// rewrite, its a pipe // rewrite, its a pipe
#[tokio::test] #[tokio::test]
async fn full_cycle_with_restart() { async fn full_cycle_with_restart() {
// let _ = std::io::stdout().write_all(b""); let res1 = start_process("temp-process", "/home/user/monitor/runner-rs/temp-process").await;
let res1 = start_process("restart-prc", "./tests/examples/restart-prc").await;
assert!(res1.is_ok()); assert!(res1.is_ok());
let res2 = let res2 =
restart_process("restart-prc", "./tests/examples/restart-prc").await; restart_process("temp-process", "/home/user/monitor/runner-rs/temp-process").await;
assert!(res2.is_ok()); assert!(res2.is_ok());
let _ = terminate_process("restart-prc").await; let _ = terminate_process("temp-process").await;
let res3 = is_active("restart-prc").await; let res3 = is_active("temp-process").await;
assert!(!res3); assert!(res3);
} }
// rewrite, its a pipe // rewrite, its a pipe
#[tokio::test] #[tokio::test]
@ -250,10 +249,7 @@ mod process_unittests {
} }
#[tokio::test] #[tokio::test]
async fn is_active_check() { async fn is_active_check() {
let res1 = start_process("tmp-prc", "./tests/examples/tmp-prc").await; assert!(is_active("systemd").await);
assert!(res1.is_ok());
assert!(is_active("tmp-prc").await);
let _ = terminate_process("tmp-prc").await;
} }
#[tokio::test] #[tokio::test]
async fn isnt_active_check() { async fn isnt_active_check() {
@ -261,17 +257,11 @@ mod process_unittests {
} }
#[tokio::test] #[tokio::test]
async fn is_frozen_check() { async fn is_frozen_check() {
let res1 = start_process("freeze-check", "./tests/examples/freeze-check").await; assert!(!is_frozen("systemd").await);
assert!(res1.is_ok());
assert!(!is_frozen("freeze-check").await);
} }
#[tokio::test] #[tokio::test]
async fn pidof_active_process() { async fn pidof_active_process() {
assert!(get_pid("pidof-prc").await.is_none()); assert!(get_pid("systemd").await.is_some());
let res1 = start_process("pidof-prc", "./tests/examples/pidof-prc").await;
assert!(res1.is_ok());
assert!(get_pid("pidof-prc").await.is_some());
let _ = terminate_process("pidof-prc").await;
} }
// broken mechanism need to check // broken mechanism need to check

View File

@ -1,5 +1,5 @@
use crate::options::structs::{CustomError, Services}; use crate::options::structs::{CustomError, Services};
use super::prcs::{is_active, is_frozen}; use crate::utils::prcs::{is_active, is_frozen};
use log::{error, warn}; use log::{error, warn};
use std::net::{TcpStream, ToSocketAddrs}; use std::net::{TcpStream, ToSocketAddrs};
use std::sync::Arc; use std::sync::Arc;

BIN
temp-process Executable file

Binary file not shown.