Compare commits
99 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
f6e440bc62 | |
|
|
b425b17d25 | |
|
|
3d179713a8 | |
|
|
abafa99e0e | |
|
|
35dbce562e | |
|
|
fb3db62728 | |
|
|
c23c437abb | |
|
|
8174affa51 | |
|
|
504bc7df84 | |
|
|
57195a71ae | |
|
|
a9351a8cb2 | |
|
|
85b9c3175c | |
|
|
780a3c1a37 | |
|
|
e5e75a417f | |
|
|
512df7ebee | |
|
|
8cd4a6dad7 | |
|
|
f08b54d51e | |
|
|
d6de6948f8 | |
|
|
5cfae2c246 | |
|
|
0b61f56bca | |
|
|
ebff93698c | |
|
|
d400318aad | |
|
|
b67feb8ce5 | |
|
|
0112066418 | |
|
|
eed9fa881a | |
|
|
5d54c5b97c | |
|
|
cc8c7f19bf | |
|
|
390f496004 | |
|
|
f6c952f208 | |
|
|
497dcbaeb6 | |
|
|
0e959b9a58 | |
|
|
d229ae1ce9 | |
|
|
576a5e0739 | |
|
|
a25a630d77 | |
|
|
e7e7eb99d8 | |
|
|
5486b6d584 | |
|
|
e59b3f9d06 | |
|
|
aaa3459920 | |
|
|
204d284871 | |
|
|
7e0d22d4e0 | |
|
|
3d12137052 | |
|
|
4d2fe57680 | |
|
|
014e8dd56d | |
|
|
8b123cd593 | |
|
|
ce067efffd | |
|
|
426e287866 | |
|
|
c03981186a | |
|
|
939bdc6676 | |
|
|
97bc91ffcd | |
|
|
7be46d4373 | |
|
|
1578216712 | |
|
|
5a1588e256 | |
|
|
4fb3533074 | |
|
|
7fa9d02343 | |
|
|
56a20eb65c | |
|
|
2dbfb4a93a | |
|
|
77a1e24a47 | |
|
|
b51a3fb0f0 | |
|
|
a75160c3e2 | |
|
|
6e86dcbf09 | |
|
|
da3d8cd129 | |
|
|
e7817a97b6 | |
|
|
2d225f4c09 | |
|
|
56769f54b9 | |
|
|
6469130662 | |
|
|
b6ecb10a77 | |
|
|
1c6729daab | |
|
|
5bba6e8b82 | |
|
|
a20fcf58f2 | |
|
|
537e853706 | |
|
|
fb0568bee6 | |
|
|
93917800f0 | |
|
|
9d14658fd0 | |
|
|
1b12ecc67f | |
|
|
5a9bf795e9 | |
|
|
d67e77c5cc | |
|
|
f3e9cb92df | |
|
|
28f0eb53f6 | |
|
|
af12a1fef1 | |
|
|
dbb49de09c | |
|
|
74e3b3ab31 | |
|
|
8a47ffe851 | |
|
|
345abde741 | |
|
|
ba395543e4 | |
|
|
591a1df5d6 | |
|
|
bda4762713 | |
|
|
56caa68135 | |
|
|
d3764448aa | |
|
|
3a51ea1418 | |
|
|
363d38bdb6 | |
|
|
9b9b8d0b13 | |
|
|
627a132e53 | |
|
|
cc18b1cfe6 | |
|
|
1884469c9e | |
|
|
406b199b09 | |
|
|
150bb87a41 | |
|
|
f4217cf8ad | |
|
|
b2ce9acf50 | |
|
|
36a81c9bf2 |
|
|
@ -1,5 +1,6 @@
|
|||
/target
|
||||
.idea
|
||||
Dockerfile
|
||||
/.env
|
||||
Cargo.lock
|
||||
settings.json
|
||||
hagent_test.sock
|
||||
release
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
24
Cargo.toml
24
Cargo.toml
|
|
@ -1,20 +1,12 @@
|
|||
[package]
|
||||
name = "runner-rs"
|
||||
version = "0.9.25"
|
||||
edition = "2021"
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"noxis-rs",
|
||||
"noxis-cli",
|
||||
]
|
||||
|
||||
[profile.dev]
|
||||
debug = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.93"
|
||||
chrono = "0.4.38"
|
||||
env_logger = "0.11.3"
|
||||
inotify = "0.10.2"
|
||||
log = "0.4.22"
|
||||
pcap = "2.2.0"
|
||||
redis = "0.25.4"
|
||||
serde = { version = "1.0.203", features = ["derive"] }
|
||||
serde_json = "1.0.118"
|
||||
sysinfo = "0.32.0"
|
||||
tokio = { version = "1.38.0", features = ["full", "time"] }
|
||||
[profile.test]
|
||||
debug = false
|
||||
|
|
|
|||
40
Dockerfile
40
Dockerfile
|
|
@ -1,25 +1,31 @@
|
|||
FROM ubuntu
|
||||
FROM ubuntu:22.04
|
||||
|
||||
RUN mkdir -p /usr/src/kii/
|
||||
USER root
|
||||
|
||||
RUN apt update && apt install -y \
|
||||
curl \
|
||||
build-essential \
|
||||
libssl-dev \
|
||||
pkg-config \
|
||||
libudev-dev \
|
||||
procps \
|
||||
gcc-riscv64-unknown-elf \
|
||||
gcc-riscv64-linux-gnu \
|
||||
binutils-riscv64-linux-gnu \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
WORKDIR /usr/src/kii/
|
||||
|
||||
RUN mkdir monitor/
|
||||
RUN mkdir -p services/temp-process/
|
||||
RUN touch services/temp-process/dep.txt
|
||||
RUN touch services/temp-process/run.sh
|
||||
RUN echo "./services/temp-process/temp-process &>/dev/null" >> services/temp-process/run.sh
|
||||
COPY . ./
|
||||
|
||||
COPY target/x86_64-unknown-linux-gnu/release/runner-rs monitor/
|
||||
COPY settings.json .
|
||||
COPY temp-process services/temp-process/
|
||||
RUN chmod +x noxis-rs/temp-process
|
||||
|
||||
RUN chmod +x services/temp-process/temp-process
|
||||
RUN chmod +x services/temp-process/run.sh
|
||||
RUN chmod +x monitor/runner-rs
|
||||
RUN rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu
|
||||
|
||||
# some troubles with execution this row-cmd
|
||||
# ?: cannot get while initializing container
|
||||
RUN export ENODE_CID=$(cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' "{print \$6}")
|
||||
RUN cargo unibuild
|
||||
|
||||
ENTRYPOINT [ "/usr/src/kii/monitor/runner-rs" ]
|
||||
ENTRYPOINT ["cargo", "test"]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,243 @@
|
|||
pipeline {
|
||||
agent any
|
||||
stages {
|
||||
stage('Tests and compiling binaries') {
|
||||
when {
|
||||
expression { env.CHANGE_BRANCH?.startsWith('feature/') || env.CHANGE_BRANCH?.startsWith('rc') }
|
||||
}
|
||||
steps {
|
||||
script {
|
||||
echo "Building and running tests in Docker for feature branch..."
|
||||
try {
|
||||
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
|
||||
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
|
||||
|
||||
sh "mkdir -p ${targetDirAmd}"
|
||||
sh "mkdir -p ${targetDirRisc}"
|
||||
|
||||
sh """
|
||||
docker build --network=host -t e-monitor .
|
||||
docker run --name e-monitor --dns 8.8.8.8 --network=host e-monitor:latest
|
||||
"""
|
||||
|
||||
sh "cp noxis-rs/settings.json ${targetDirAmd}"
|
||||
sh "cp noxis-rs/settings.json ${targetDirRisc}"
|
||||
|
||||
sh "docker cp e-monitor:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-cli ${targetDirAmd}"
|
||||
sh "docker cp e-monitor:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-rs ${targetDirAmd}"
|
||||
|
||||
sh "docker cp e-monitor:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-cli ${targetDirRisc}"
|
||||
sh "docker cp e-monitor:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-rs ${targetDirRisc}"
|
||||
|
||||
echo "Tests passed successfully and binaries were extracted!"
|
||||
} catch (Exception e) {
|
||||
echo "Tests failed during Docker run."
|
||||
error "Build failed at 'CI for feature' stage."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Calculate Install Size') {
|
||||
when {
|
||||
expression { env.CHANGE_BRANCH?.startsWith('rc') }
|
||||
}
|
||||
steps {
|
||||
script {
|
||||
echo "Calculating installation size for rc branch..."
|
||||
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
|
||||
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
|
||||
|
||||
def installSizeAmd = sh(script: "du -s --block-size=1024 ${targetDirAmd} | awk '{print \$1}'", returnStdout: true).trim()
|
||||
def installSizeRisc = sh(script: "du -s --block-size=1024 ${targetDirRisc} | awk '{print \$1}'", returnStdout: true).trim()
|
||||
|
||||
env.INSTALL_SIZE_AMD = installSizeAmd
|
||||
env.INSTALL_SIZE_RISC = installSizeRisc
|
||||
|
||||
echo "Installation size for amd64: ${env.INSTALL_SIZE_AMD} kB"
|
||||
echo "Installation size for riscv64: ${env.INSTALL_SIZE_RISC} kB"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Create Deb Packages') {
|
||||
when {
|
||||
expression { env.CHANGE_BRANCH?.startsWith('rc') }
|
||||
}
|
||||
steps {
|
||||
script {
|
||||
echo "Creating deb packages for rc branch..."
|
||||
|
||||
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
|
||||
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
|
||||
def packageName = "noxis"
|
||||
def version = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
|
||||
def createDebPackage = { arch, binDir, targetDir, installSize ->
|
||||
echo "Creating deb package for ${arch}..."
|
||||
|
||||
sh """
|
||||
mkdir -p ${targetDir}/package/DEBIAN
|
||||
mkdir -p ${targetDir}/package/usr/local/enode/${packageName}
|
||||
mkdir -p ${targetDir}/package/usr/bin
|
||||
mkdir -p ${targetDir}/package/etc/enode
|
||||
mkdir -p ${targetDir}/package/lib/systemd/system
|
||||
|
||||
cp ${binDir}/noxis-cli ${targetDir}/package/usr/local/enode/${packageName}/
|
||||
cp ${binDir}/noxis-rs ${targetDir}/package/usr/local/enode/${packageName}/
|
||||
cp ${binDir}/settings.json ${targetDir}/package/etc/enode/
|
||||
|
||||
cat > ${targetDir}/package/DEBIAN/control <<EOF
|
||||
Package: ${packageName}
|
||||
Version: ${version}
|
||||
Section: unknown
|
||||
Priority: optional
|
||||
Architecture: ${arch}
|
||||
Maintainer: kis <supervisor@rosatom.ru>
|
||||
Description: Noxis Agent Linux
|
||||
Installed-Size: ${installSize}
|
||||
EOF
|
||||
|
||||
chmod +x ${targetDir}/package/usr/local/enode/${packageName}/noxis-cli
|
||||
chmod +x ${targetDir}/package/usr/local/enode/${packageName}/noxis-rs
|
||||
|
||||
cat > ${targetDir}/package/DEBIAN/postinst <<EOF
|
||||
#!/bin/bash
|
||||
ln -sf "/usr/local/enode/${packageName}/noxis-cli" "/usr/bin/noxis-cli"
|
||||
ln -sf "/usr/local/enode/${packageName}/noxis-rs" "/usr/bin/noxis-rs"
|
||||
systemctl daemon-reload
|
||||
systemctl start ${packageName}.service
|
||||
EOF
|
||||
chmod +x ${targetDir}/package/DEBIAN/postinst
|
||||
|
||||
cat > ${targetDir}/package/lib/systemd/system/${packageName}.service <<EOF
|
||||
[Unit]
|
||||
Description=Noxis Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/enode/${packageName}/noxis-rs
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
dpkg-deb --build ${targetDir}/package ${targetDir}/rc/${arch}/${packageName}_${version}_${arch}.deb
|
||||
echo "${packageName}_${version}_${arch}.deb created successfully!"
|
||||
"""
|
||||
}
|
||||
|
||||
createDebPackage("amd64", targetDirAmd, env.WORKSPACE, env.INSTALL_SIZE_AMD)
|
||||
createDebPackage("riscv64", targetDirRisc, env.WORKSPACE, env.INSTALL_SIZE_RISC)
|
||||
|
||||
env.DEB_PATH_AMD64 = "${env.WORKSPACE}/rc/amd64/${packageName}_${version}_amd64.deb"
|
||||
env.DEB_PATH_RISCV64 = "${env.WORKSPACE}/rc/riscv64/${packageName}_${version}_riscv64.deb"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Transfer Binaries') {
|
||||
when {
|
||||
expression { env.CHANGE_BRANCH?.startsWith('feature/') }
|
||||
}
|
||||
steps {
|
||||
script {
|
||||
echo "Transferring binaries packages to remote machine..."
|
||||
|
||||
withCredentials([usernamePassword(credentialsId: 'ift', passwordVariable: 'SSH_PASS', usernameVariable: 'SSH_USER')]) {
|
||||
def targetDir = "${env.WORKSPACE}/${env.CHANGE_BRANCH}"
|
||||
def remote = [:]
|
||||
remote.name = "remote-server"
|
||||
remote.host = "192.168.2.33"
|
||||
remote.user = SSH_USER
|
||||
remote.password = SSH_PASS
|
||||
remote.allowAnyHosts = true
|
||||
|
||||
sshPut remote: remote, from: "${targetDir}", into: "/home/user/deployments/"
|
||||
echo "Binaries successfully transferred to remote machine."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage('Upload Debs to Repository') {
|
||||
when {
|
||||
expression { env.CHANGE_BRANCH?.startsWith('rc') }
|
||||
}
|
||||
steps {
|
||||
script {
|
||||
echo "Uploading deb packages to remote repository..."
|
||||
|
||||
withCredentials([usernamePassword(credentialsId: 'prod', passwordVariable: 'SSH_PASS', usernameVariable: 'SSH_USER')]) {
|
||||
def remote = [:]
|
||||
remote.name = "remote-server"
|
||||
remote.host = "192.168.2.99"
|
||||
remote.user = SSH_USER
|
||||
remote.password = SSH_PASS
|
||||
remote.allowAnyHosts = true
|
||||
|
||||
echo "Uploading deb packages using sshPut..."
|
||||
sshPut remote: remote, from: "${env.DEB_PATH_AMD64}", into: "/home/user/repo/debs/"
|
||||
sshPut remote: remote, from: "${env.DEB_PATH_RISCV64}", into: "/home/user/repo/debs/"
|
||||
|
||||
echo "Running repository update commands via sshCommand..."
|
||||
sshCommand remote: remote, command: '''
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
cd /home/user/repo/debs/
|
||||
for deb in *.deb; do
|
||||
reprepro -b /var/www/deb/debian/ includedeb stable $deb
|
||||
done
|
||||
rm -f *.deb
|
||||
'''
|
||||
|
||||
echo "Deb packages successfully uploaded and added to the repository!"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
echo "Cleaning up workspace..."
|
||||
try {
|
||||
if (fileExists("${env.WORKSPACE}/package/")) {
|
||||
sh "rm -rf ${env.WORKSPACE}/package/"
|
||||
}
|
||||
if (fileExists("${env.WORKSPACE}/rc/")) {
|
||||
sh "rm -rf ${env.WORKSPACE}/rc/"
|
||||
}
|
||||
sh "docker stop e-monitor && docker rm e-monitor"
|
||||
} catch (Exception e) {
|
||||
echo "Failed to clean up workspace: ${e}"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
when {
|
||||
expression { env.CHANGE_BRANCH?.startsWith('rc') }
|
||||
}
|
||||
echo "Attempting to merge PR ${env.CHANGE_ID} into master..."
|
||||
withCredentials([usernamePassword(credentialsId: 'gitea_creds', usernameVariable: 'GITEA_USER', passwordVariable: 'GITEA_PASS')]) {
|
||||
def prId = env.CHANGE_ID
|
||||
sh """
|
||||
curl -X POST \
|
||||
-u "${GITEA_USER}:${GITEA_PASS}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"Do":"merge"}' \
|
||||
http://git.entcor/api/v1/repos/VladislavD/runner-rs/pulls/${prId}/merge
|
||||
"""
|
||||
echo "PR ${prId} merged successfully into master!"
|
||||
}
|
||||
}
|
||||
}
|
||||
failure {
|
||||
echo "Pipeline failed. Check the logs for details."
|
||||
}
|
||||
aborted {
|
||||
echo "Pipeline was aborted."
|
||||
}
|
||||
}
|
||||
}
|
||||
13
README.md
13
README.md
|
|
@ -1,13 +1,16 @@
|
|||
|
||||
# runner-rs ( with amd64 and riscv64 support )
|
||||

|
||||
in-container integrating util to handle processes runtime
|
||||
|
||||
# noxis-rs
|
||||

|
||||
### In-container integrating util to handle processes runtime
|
||||
( with amd64 and riscv64 support )
|
||||
|
||||
## Depends on
|
||||
- `rustup (>=1.27.1)`
|
||||
- `gcc-riscv64-unknown-elf`
|
||||
- `build-essential`
|
||||
- `gcc-riscv64-linux-gnu`
|
||||
- `binutils-riscv64-linux-gnu`
|
||||
|
||||
|
||||
## Setting up
|
||||
Download and execute rustup.sh
|
||||
|
|
@ -29,7 +32,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
|||
cd runner-rs/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu
|
||||
~~~
|
||||
> [!NOTE]
|
||||
> Cargo is configured to build an app for amd64/linux defaultly. RISC-based compilation is optional.
|
||||
> Cargo is configured to build an app for amd64/linux defaultly. RISCV-based compilation is optional.
|
||||
|
||||
3.1. Release build of app for amd64/linux
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,75 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Скрипт для сборки и копирования бинарников
|
||||
# Использование: ./build.sh <архитектура>
|
||||
# Поддерживаемые архитектуры: amd64, riscv64
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "Ошибка: Необходимо указать архитектуру (например, amd64 или riscv64)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ARCH="$1"
|
||||
TARGET_DIR="release/${ARCH}"
|
||||
CONTAINER_NAME="e-monitor"
|
||||
|
||||
SUPPORTED_ARCHS=("amd64" "riscv64")
|
||||
if [[ ! " ${SUPPORTED_ARCHS[@]} " =~ " ${ARCH} " ]]; then
|
||||
echo "Ошибка: Неизвестная архитектура $ARCH. Допустимые значения: ${SUPPORTED_ARCHS[*]}."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# На случай, если контейнер с таким именем уже существует
|
||||
docker stop e-monitor && docker rm e-monitor
|
||||
|
||||
echo "Building Docker image..."
|
||||
|
||||
docker build --network=host -t e-monitor . || {
|
||||
echo "Ошибка: Не удалось построить Docker-образ."
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "Running Docker container..."
|
||||
|
||||
docker run --name "$CONTAINER_NAME" --dns 8.8.8.8 --network=host e-monitor:latest || {
|
||||
echo "Ошибка: Не удалось запустить Docker-контейнер."
|
||||
exit 1
|
||||
}
|
||||
|
||||
echo "Creating target directory: $TARGET_DIR"
|
||||
mkdir -p "$TARGET_DIR"
|
||||
|
||||
case "$ARCH" in
|
||||
amd64)
|
||||
echo "Copying binaries for architecture: amd64"
|
||||
docker cp "$CONTAINER_NAME:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-cli" "$TARGET_DIR/" || {
|
||||
echo "Ошибка: Не удалось скопировать noxis-cli для amd64."
|
||||
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
|
||||
exit 1
|
||||
}
|
||||
docker cp "$CONTAINER_NAME:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-rs" "$TARGET_DIR/" || {
|
||||
echo "Ошибка: Не удалось скопировать noxis-rs для amd64."
|
||||
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
|
||||
exit 1
|
||||
}
|
||||
;;
|
||||
riscv64)
|
||||
echo "Copying binaries for architecture: riscv64"
|
||||
docker cp "$CONTAINER_NAME:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-cli" "$TARGET_DIR/" || {
|
||||
echo "Ошибка: Не удалось скопировать noxis-cli для riscv64."
|
||||
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
|
||||
exit 1
|
||||
}
|
||||
docker cp "$CONTAINER_NAME:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-rs" "$TARGET_DIR/" || {
|
||||
echo "Ошибка: Не удалось скопировать noxis-rs для riscv64."
|
||||
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
|
||||
exit 1
|
||||
}
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Stopping and removing Docker container..."
|
||||
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
|
||||
|
||||
echo "Build and extraction completed successfully for architecture: $ARCH"
|
||||
exit 0
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
[package]
|
||||
name = "noxis-cli"
|
||||
version = "0.2.4"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.94"
|
||||
clap = { version = "4.5.22", features = ["derive"] }
|
||||
serde = { version = "1.0.215", features = ["derive"] }
|
||||
serde_json = "1.0.133"
|
||||
thiserror = "2.0.11"
|
||||
tokio = { version = "1.42.0", features = ["full", "net"] }
|
||||
|
|
@ -0,0 +1,145 @@
|
|||
use clap::{Parser, Subcommand};
|
||||
|
||||
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
|
||||
pub struct Cli {
|
||||
#[command(
|
||||
subcommand,
|
||||
help = "to manage Noxis work",
|
||||
)]
|
||||
command : Commands,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
|
||||
pub enum Commands {
|
||||
#[command(
|
||||
about = "To get info about current Noxis status",
|
||||
)]
|
||||
Status,
|
||||
#[command(
|
||||
about = "To start Noxis process",
|
||||
)]
|
||||
Start(StartAction),
|
||||
#[command(
|
||||
about = "To stop Noxis process",
|
||||
)]
|
||||
Stop,
|
||||
#[command(
|
||||
about = "To restart Noxis process",
|
||||
)]
|
||||
Restart(StartAction),
|
||||
#[command(
|
||||
about = "To get list of processes that are being monitoring",
|
||||
)]
|
||||
Processes,
|
||||
// process command
|
||||
#[command(
|
||||
about = "To manage current process that is being monitoring",
|
||||
)]
|
||||
Process(ProcessCommand),
|
||||
// config command =
|
||||
#[command(
|
||||
about = "To manage config settings",
|
||||
)]
|
||||
Config(ConfigCommand),
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
|
||||
pub struct StartAction {
|
||||
#[arg(
|
||||
long="with-flags",
|
||||
num_args = 1..,
|
||||
value_delimiter = ' '
|
||||
)]
|
||||
flags : Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
|
||||
pub struct ConfigCommand {
|
||||
#[command(subcommand)]
|
||||
action : ConfigAction,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
|
||||
pub enum ConfigAction {
|
||||
#[command(
|
||||
about = "To change current Noxis configuration",
|
||||
)]
|
||||
Local(LocalConfig),
|
||||
#[command(
|
||||
about = "To change credentials of the remote config server",
|
||||
)]
|
||||
Remote,
|
||||
#[command(
|
||||
about = "To reset all config settings",
|
||||
)]
|
||||
Reset,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
|
||||
pub struct LocalConfig {
|
||||
// flag
|
||||
#[arg(
|
||||
long = "json",
|
||||
action,
|
||||
help = "to read following input as JSON",
|
||||
)]
|
||||
is_json : bool,
|
||||
// value
|
||||
#[arg(
|
||||
help = "path to config file or config String (with --json flag)",
|
||||
)]
|
||||
config : String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
|
||||
pub struct ProcessCommand {
|
||||
#[arg(
|
||||
help = "name of needed process",
|
||||
)]
|
||||
process : String,
|
||||
#[command(
|
||||
subcommand,
|
||||
help = "To get current process's status",
|
||||
)]
|
||||
action : ProcessAction,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
|
||||
enum ProcessAction {
|
||||
#[command(
|
||||
about = "To get info about current process status",
|
||||
)]
|
||||
Status,
|
||||
#[command(
|
||||
about = "To start current process",
|
||||
)]
|
||||
Start,
|
||||
#[command(
|
||||
about = "To stop current process",
|
||||
)]
|
||||
Stop,
|
||||
#[command(
|
||||
about = "To freeze (hybernaze) current process",
|
||||
)]
|
||||
Freeze,
|
||||
#[command(
|
||||
about = "To unfreeze (unhybernaze) current process",
|
||||
)]
|
||||
Unfreeze,
|
||||
#[command(
|
||||
about = "To restart current process",
|
||||
)]
|
||||
Restart,
|
||||
#[command(
|
||||
about = "To get info about current process's dependencies",
|
||||
)]
|
||||
Deps,
|
||||
#[command(
|
||||
about = "To get info about current process's files-dependencies",
|
||||
)]
|
||||
Files,
|
||||
#[command(
|
||||
about = "To get info about current process's services-dependencies",
|
||||
)]
|
||||
Services,
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
use thiserror::Error;
|
||||
use super::cli_net::NOXIS_RS_CREDS;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum NoxisCliError {
|
||||
#[error("Can't send any data to {:?}. Noxis-rs daemon is disabled or can't be accessed", NOXIS_RS_CREDS)]
|
||||
NoxisDaemonMissing,
|
||||
#[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's web-functionality")]
|
||||
PortIsNotWritable,
|
||||
#[error("Can't send Cli-prompt to the Noxis-rs. Check it's state")]
|
||||
CliPromptCanNotBeSent,
|
||||
#[error("Can't parse CLI struct and send as byte stream")]
|
||||
ToStringCliParsingParsing,
|
||||
}
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
use tokio::net::TcpStream;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::time::{Duration, sleep};
|
||||
use anyhow::Result;
|
||||
use super::Cli;
|
||||
use super::cli_error::NoxisCliError;
|
||||
|
||||
pub const NOXIS_RS_CREDS: &str = "127.0.0.1:7753";
|
||||
|
||||
|
||||
pub async fn create_tcp_stream() -> Result<TcpStream> {
|
||||
Ok(TcpStream::connect(NOXIS_RS_CREDS).await.map_err(|_| NoxisCliError::NoxisDaemonMissing)?)
|
||||
}
|
||||
|
||||
pub async fn try_send(stream: Result<TcpStream>, params: Cli) -> Result<()> {
|
||||
use serde_json::to_string;
|
||||
let mut stream = stream.map_err(|_| NoxisCliError::NoxisDaemonMissing)?;
|
||||
loop {
|
||||
if stream.writable().await.is_err() {
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
continue;
|
||||
}
|
||||
// let msg: Cli = from_str(&format!("{:?}", params))?;
|
||||
let msg= to_string(¶ms).map_err(|_| NoxisCliError::ToStringCliParsingParsing)?;
|
||||
// let msg = r"HTTP/1.1 POST\r\nContent-Length: 14\r\nContent-Type: text/plain\r\n\r\nHello, World!@";
|
||||
|
||||
stream.write_all(msg.as_bytes()).await.map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?;
|
||||
// ...
|
||||
break;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
mod cli;
|
||||
mod cli_net;
|
||||
mod cli_error;
|
||||
|
||||
pub use cli::*;
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
mod cli;
|
||||
mod cli_net;
|
||||
mod cli_error;
|
||||
|
||||
use clap::Parser;
|
||||
use cli::Cli;
|
||||
use cli_net::{create_tcp_stream, try_send};
|
||||
use anyhow::Result;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()>{
|
||||
let cli = Cli::parse();
|
||||
try_send(create_tcp_stream().await, cli).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
[package]
|
||||
name = "noxis-rs"
|
||||
version = "0.11.10"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.93"
|
||||
chrono = "0.4.38"
|
||||
clap = { version = "4.5.21", features = ["derive"] }
|
||||
env_logger = "0.11.3"
|
||||
inotify = "0.10.2"
|
||||
log = "0.4.22"
|
||||
pcap = "2.2.0"
|
||||
redis = "0.25.4"
|
||||
serde = { version = "1.0.203", features = ["derive"] }
|
||||
serde_json = "1.0.118"
|
||||
sysinfo = "0.32.0"
|
||||
tokio = { version = "1.38.0", features = ["full", "time"] }
|
||||
noxis-cli = { path = "../noxis-cli" }
|
||||
dotenv = "0.15.0"
|
||||
|
|
@ -4,12 +4,12 @@
|
|||
"processes": [
|
||||
{
|
||||
"name": "temp-process",
|
||||
"path": "/home/user/monitor/runner-rs/temp-process",
|
||||
"path": "./temp-process",
|
||||
"dependencies": {
|
||||
"files": [
|
||||
{
|
||||
"filename": "dep-file",
|
||||
"src": "/home/user/monitor/runner-rs/tests/examples/",
|
||||
"src": "./tests/examples/",
|
||||
"triggers": {
|
||||
"onDelete": "stop",
|
||||
"onChange": "stay"
|
||||
|
|
@ -31,4 +31,3 @@
|
|||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
|
@ -1,27 +1,33 @@
|
|||
mod options;
|
||||
mod utils;
|
||||
|
||||
use anyhow::Error;
|
||||
use clap::Parser;
|
||||
use log::{error, info};
|
||||
use options::config::*;
|
||||
use options::logger::setup_logger;
|
||||
use options::signals::set_valid_destructor;
|
||||
use options::structs::*;
|
||||
use options::structs::Processes;
|
||||
use options::cli_pipeline::init_cli_pipeline;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc;
|
||||
use utils::*;
|
||||
use options::preboot::PrebootParams;
|
||||
|
||||
#[tokio::main(flavor = "multi_thread")]
|
||||
async fn main() {
|
||||
async fn main() -> anyhow::Result<()>{
|
||||
let preboot = Arc::new(PrebootParams::parse().validate()?);
|
||||
|
||||
let _ = setup_logger();
|
||||
|
||||
info!("Runner is configurating...");
|
||||
|
||||
// setting up redis connection \
|
||||
// then conf checks to choose the most actual \
|
||||
let processes: Processes = get_actual_config().await.unwrap_or_else(|| {
|
||||
let processes: Processes = get_actual_config(preboot.clone()).await.unwrap_or_else(|| {
|
||||
error!("No actual configuration for runner. Stopping...");
|
||||
std::process::exit(101);
|
||||
std::process::exit(1);
|
||||
});
|
||||
|
||||
info!(
|
||||
|
|
@ -32,7 +38,7 @@ async fn main() {
|
|||
|
||||
if processes.processes.is_empty() {
|
||||
error!("Processes list is null, runner-rs initialization is stopped");
|
||||
return;
|
||||
return Err(Error::msg("Empty processes segment in config"));
|
||||
}
|
||||
let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![];
|
||||
// is in need to send to the signals handler thread
|
||||
|
|
@ -75,13 +81,18 @@ async fn main() {
|
|||
|
||||
// remote config update subscription
|
||||
handler.push(tokio::spawn(async move {
|
||||
let _ = subscribe_config_stream(Arc::new(processes)).await;
|
||||
let _ = subscribe_config_stream(Arc::new(processes), preboot.clone()).await;
|
||||
}));
|
||||
|
||||
// cli pipeline
|
||||
handler.push(tokio::spawn(async move {
|
||||
let _ = init_cli_pipeline().await;
|
||||
}));
|
||||
|
||||
for i in handler {
|
||||
let _ = i.await;
|
||||
}
|
||||
return;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// todo: integration tests
|
||||
|
|
@ -4,3 +4,5 @@ pub mod config;
|
|||
pub mod logger;
|
||||
pub mod signals;
|
||||
pub mod structs;
|
||||
pub mod preboot;
|
||||
pub mod cli_pipeline;
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
use log::{error, info, warn};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use anyhow::{Result as DynResult, Error};
|
||||
use tokio::time::{sleep, Duration};
|
||||
use std::{borrow::BorrowMut, net::{IpAddr, Ipv4Addr}};
|
||||
// use std::io::BufReader;
|
||||
use tokio::io::{BufReader, AsyncWriteExt, AsyncBufReadExt};
|
||||
use noxis_cli::Cli;
|
||||
use serde_json::from_str;
|
||||
|
||||
/// # Fn `init_cli_pipeline`
|
||||
/// ## for catching all input requests from CLI
|
||||
///
|
||||
/// *input* : -
|
||||
///
|
||||
/// *output* : `anyhow::Result<()>` to wrap errors
|
||||
///
|
||||
/// *initiator* : fn `main`
|
||||
///
|
||||
/// *managing* : `TcpListener` object to handle requests
|
||||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
pub async fn init_cli_pipeline() -> DynResult<()> {
|
||||
match init_listener().await {
|
||||
Some(list) => {
|
||||
loop {
|
||||
if let Ok((socket, addr)) = list.accept().await {
|
||||
// isolation
|
||||
if IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)) != addr.ip() {
|
||||
warn!("Declined attempt to connect TCP-socket from {}", addr);
|
||||
continue;
|
||||
}
|
||||
process_connection(socket).await;
|
||||
}
|
||||
sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
// Ok(())
|
||||
},
|
||||
None => Err(Error::msg("Addr 127.0.0.1:7753 is already in use"))
|
||||
}
|
||||
}
|
||||
|
||||
/// # Fn `init_listener`
|
||||
/// ## for creating TCP-listener for communicating with CLI
|
||||
///
|
||||
/// *input* : -
|
||||
///
|
||||
/// *output* : `Some<TcpListener>` if port 7753 was opened | None if not
|
||||
///
|
||||
/// *initiator* : fn `init_cli_pipeline`
|
||||
///
|
||||
/// *managing* : `TcpListener` object to handle requests
|
||||
///
|
||||
/// *depends on* : `tokio::net::TcpListener`
|
||||
///
|
||||
async fn init_listener() -> Option<TcpListener> {
|
||||
match TcpListener::bind("127.0.0.1:7753").await {
|
||||
Ok(listener) => {
|
||||
info!("Runner is listening localhost:7753");
|
||||
Some(listener)
|
||||
},
|
||||
Err(_) => {
|
||||
error!("Cannot create TCP listener for CLI");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// # Fn `process_connection`
|
||||
/// ## for processing input CLI requests
|
||||
///
|
||||
/// *input* : mut stream: `TcpStream`
|
||||
///
|
||||
/// *output* : -
|
||||
///
|
||||
/// *initiator* : fn `init_cli_pipeline`
|
||||
///
|
||||
/// *managing* : mutable object of `TcpStream`
|
||||
///
|
||||
/// *depends on* : `tokio::net::TcpStream`
|
||||
///
|
||||
async fn process_connection(mut stream: TcpStream) {
|
||||
let buf_reader = BufReader::new(stream.borrow_mut());
|
||||
let mut rqst = buf_reader.lines();
|
||||
|
||||
|
||||
while let Ok(Some(line)) = rqst.next_line().await {
|
||||
if line.is_empty() {
|
||||
break
|
||||
}
|
||||
match from_str::<Cli>(&line) {
|
||||
Ok(req) => {
|
||||
// TODO: func wrapper
|
||||
dbg!(req);
|
||||
},
|
||||
Err(_) => {
|
||||
break
|
||||
},
|
||||
}
|
||||
println!("{}", line);
|
||||
}
|
||||
|
||||
let response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\nContent-Type: text/plain\r\n\r\nHello, World!";
|
||||
stream.write_all(response.as_bytes()).await.unwrap();
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
use crate::options::structs::*;
|
||||
use super::structs::*;
|
||||
use log::{error, info, warn};
|
||||
use redis::{Client, Connection};
|
||||
use std::fs::OpenOptions;
|
||||
|
|
@ -7,9 +7,10 @@ use std::os::unix::process::CommandExt;
|
|||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
use std::{env, fs};
|
||||
use tokio::time::Duration;
|
||||
use super::preboot::PrebootParams;
|
||||
use tokio::time::{Duration, sleep};
|
||||
|
||||
const CONFIG_PATH: &str = "settings.json";
|
||||
// const CONFIG_PATH: &str = "settings.json";
|
||||
|
||||
/// # Fn `load_processes`
|
||||
/// ## for reading and parsing *local* storing config
|
||||
|
|
@ -46,43 +47,51 @@ fn load_processes(json_filename: &str) -> Option<Processes> {
|
|||
///
|
||||
/// *depends on* : struct `Processes`
|
||||
///
|
||||
pub async fn get_actual_config() -> Option<Processes> {
|
||||
pub async fn get_actual_config(params : Arc<PrebootParams>) -> Option<Processes> {
|
||||
// * if no local conf -> loop and +inf getting conf from redis server
|
||||
// * if local conf -> once getting conf from redis server
|
||||
match load_processes(CONFIG_PATH) {
|
||||
let config_path = params.config.to_str().unwrap_or_else(|| {
|
||||
error!("Invalid character in config file. Config path was set to default");
|
||||
"settings.json"
|
||||
});
|
||||
info!("Configurating config module with params: no-remote-config={}, no-sub={}, local config path={:?}, remote server={}", params.no_remote_config, params.no_sub, params.config, params.remote_server_url);
|
||||
match load_processes(config_path) {
|
||||
Some(local_conf) => {
|
||||
info!(
|
||||
"Found local configuration, version - {}",
|
||||
&local_conf.date_of_creation
|
||||
);
|
||||
if let Some(remote_conf) =
|
||||
// TODO : rework with pubsub mech
|
||||
once_get_remote_configuration(&format!("redis://{}/", local_conf.config_server))
|
||||
{
|
||||
return match config_comparing(&local_conf, &remote_conf) {
|
||||
ConfigActuality::Local => {
|
||||
info!("Local config is actual");
|
||||
Some(local_conf)
|
||||
}
|
||||
ConfigActuality::Remote => {
|
||||
info!("Pulled config is more actual. Saving changes!");
|
||||
if save_new_config(&remote_conf, CONFIG_PATH).is_err() {
|
||||
error!("Saving changes process failed due to unexpected error...")
|
||||
if !params.no_remote_config {
|
||||
if let Some(remote_conf) =
|
||||
// TODO : rework with pubsub mech
|
||||
once_get_remote_configuration(&format!("redis://{}/", ¶ms.remote_server_url))
|
||||
{
|
||||
return match config_comparing(&local_conf, &remote_conf) {
|
||||
ConfigActuality::Local => {
|
||||
info!("Local config is actual");
|
||||
Some(local_conf)
|
||||
}
|
||||
Some(remote_conf)
|
||||
}
|
||||
};
|
||||
ConfigActuality::Remote => {
|
||||
info!("Pulled config is more actual. Saving changes!");
|
||||
if save_new_config(&remote_conf, config_path).is_err() {
|
||||
error!("Saving changes process failed due to unexpected error...")
|
||||
}
|
||||
Some(remote_conf)
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
Some(local_conf)
|
||||
}
|
||||
None => {
|
||||
warn!("No local valid conf was found. Trying to pull remote one...");
|
||||
let mut conn = get_connection_watcher(&open_watcher("redis://localhost/"));
|
||||
let remote_config = get_remote_conf_watcher(&mut conn).await;
|
||||
if let Some(conf) = remote_config {
|
||||
info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation);
|
||||
let _ = save_new_config(&conf, CONFIG_PATH);
|
||||
return Some(conf);
|
||||
if !params.no_remote_config {
|
||||
let mut conn = get_connection_watcher(&open_watcher(&format!("redis://{}/", ¶ms.remote_server_url)));
|
||||
if let Some(conf) = get_remote_conf_watcher(&mut conn).await {
|
||||
info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation);
|
||||
let _ = save_new_config(&conf, config_path);
|
||||
return Some(conf);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
|
@ -182,23 +191,22 @@ fn once_get_remote_configuration(serv_info: &str) -> Option<Processes> {
|
|||
if remote.is_none() {
|
||||
error!("Pulled config is invalid. Check it in Redis Server");
|
||||
}
|
||||
return remote;
|
||||
remote
|
||||
},
|
||||
Err(_) => {
|
||||
error!("Cannot extract payload from new message. Check Redis Server state");
|
||||
return None;
|
||||
None
|
||||
},
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
warn!("Cannot get config from Redis Server. Empty channel");
|
||||
return None;
|
||||
None
|
||||
},
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
error!("Redis subscription process failed. Check Redis configuration!");
|
||||
return None;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -311,8 +319,13 @@ fn restart_main_thread() -> std::io::Result<()> {
|
|||
///
|
||||
/// *depends on* : `Processes`
|
||||
///
|
||||
pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(), CustomError> {
|
||||
if let Ok(client) = Client::open(format!("redis://{}/", &actual_prcs.config_server)) {
|
||||
pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<PrebootParams>) -> Result<(), CustomError> {
|
||||
let config_path = params.config.to_str().unwrap_or_else(|| "settings.json");
|
||||
|
||||
if params.no_sub || params.no_remote_config {
|
||||
return Err(CustomError::Fatal);
|
||||
}
|
||||
if let Ok(client) = Client::open(format!("redis://{}/", ¶ms.remote_server_url)) {
|
||||
if let Ok(mut conn) = client.get_connection() {
|
||||
match crate::utils::get_container_id() {
|
||||
Some(channel_name) => {
|
||||
|
|
@ -322,7 +335,6 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(),
|
|||
info!("Runner subscribed on config update publishing in channel {}", &channel_name);
|
||||
loop {
|
||||
if let Ok(msg) = pubsub.get_message() {
|
||||
info!("New config was pulled from Redis Server");
|
||||
let get_remote_config: Result<String, redis::RedisError> = msg.get_payload();
|
||||
match get_remote_config {
|
||||
Ok(payload) => {
|
||||
|
|
@ -330,8 +342,8 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(),
|
|||
match config_comparing(&actual_prcs, &remote_config) {
|
||||
ConfigActuality::Remote => {
|
||||
warn!("Pulled config is actual. Saving and restarting...");
|
||||
if save_new_config(&remote_config, CONFIG_PATH).is_err() {
|
||||
error!("Error with saving new config to {}. Stopping sub mechanism...", &CONFIG_PATH);
|
||||
if save_new_config(&remote_config, config_path).is_err() {
|
||||
error!("Error with saving new config to {}. Stopping sub mechanism...", config_path);
|
||||
return Err(CustomError::Fatal);
|
||||
}
|
||||
if restart_main_thread().is_err() {
|
||||
|
|
@ -339,7 +351,10 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(),
|
|||
return Err(CustomError::Fatal);
|
||||
}
|
||||
}
|
||||
_ => continue,
|
||||
_ => {
|
||||
warn!("Pulled new config. Current config is more actual ...");
|
||||
continue
|
||||
},
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
|
@ -352,7 +367,7 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(),
|
|||
},
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(30)).await;
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
} else {
|
||||
error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name);
|
||||
|
|
@ -433,7 +448,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr
|
|||
Err(_) => Err(CustomError::Fatal),
|
||||
}
|
||||
}
|
||||
Err(_) => return Err(CustomError::Fatal),
|
||||
Err(_) => Err(CustomError::Fatal),
|
||||
}
|
||||
}
|
||||
Err(_) => Err(CustomError::Fatal),
|
||||
|
|
@ -61,8 +61,27 @@ pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> {
|
|||
#[cfg(test)]
|
||||
mod logger_tests {
|
||||
use super::*;
|
||||
// #[test]
|
||||
// fn setting_up_logger() {
|
||||
// assert!(setup_logger().is_ok());
|
||||
// }
|
||||
|
||||
#[test]
|
||||
fn setting_up_logger() {
|
||||
assert!(setup_logger().is_ok());
|
||||
Builder::new()
|
||||
.format(move |buf, record| {
|
||||
writeln!(
|
||||
buf,
|
||||
"|{}| {} [{}] - {}",
|
||||
get_container_id().unwrap_or("NODE".to_string()).trim(),
|
||||
Local::now().format("%d-%m-%Y %H:%M:%S"),
|
||||
record.level(),
|
||||
record.args(),
|
||||
)
|
||||
})
|
||||
.filter(None, LevelFilter::Info)
|
||||
.target(env_logger::Target::Stdout)
|
||||
.is_test(true)
|
||||
.init();
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,382 @@
|
|||
// module to handle pre-boot params of the monitor
|
||||
#[allow(unused_imports)]
|
||||
use anyhow::{Result, Ok, Error};
|
||||
use clap::Parser;
|
||||
use std::path::PathBuf;
|
||||
use std::env::var;
|
||||
use dotenv::dotenv;
|
||||
|
||||
const SOCKET_PATH: &str = "/var/run/enode/hostagent.sock";
|
||||
|
||||
///
|
||||
enum EnvVars {
|
||||
NoxisNoHagent,
|
||||
NoxisNoLogs,
|
||||
NoxisRefreshLogs,
|
||||
NoxisNoRemoteConfig,
|
||||
NoxisNoConfigSub,
|
||||
NoxisSocketPath,
|
||||
NoxisLogTo,
|
||||
NoxisRemoteServerUrl,
|
||||
NoxisConfig,
|
||||
NoxisMetrics,
|
||||
}
|
||||
|
||||
///
|
||||
impl std::fmt::Display for EnvVars {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
EnvVars::NoxisNoHagent => write!(f, "NOXIS_NO_HAGENT"),
|
||||
EnvVars::NoxisNoLogs => write!(f, "NOXIS_NO_LOGS"),
|
||||
EnvVars::NoxisRefreshLogs => write!(f, "NOXIS_REFRESH_LOGS"),
|
||||
EnvVars::NoxisNoRemoteConfig => write!(f, "NOXIS_NO_REMOTE_CONFIG"),
|
||||
EnvVars::NoxisNoConfigSub => write!(f, "NOXIS_NO_CONFIG_SUB"),
|
||||
EnvVars::NoxisSocketPath => write!(f, "NOXIS_SOCKET_PATH"),
|
||||
EnvVars::NoxisLogTo => write!(f, "NOXIS_LOG_TO"),
|
||||
EnvVars::NoxisRemoteServerUrl => write!(f, "NOXIS_REMOTE_SERVER_URL"),
|
||||
EnvVars::NoxisConfig => write!(f, "NOXIS_CONFIG"),
|
||||
EnvVars::NoxisMetrics => write!(f, "NOXIS_METRICS"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
impl<'a> EnvVars {
|
||||
// Default trait func is not satisfying this issue
|
||||
fn default(self) -> &'a str {
|
||||
match self {
|
||||
EnvVars::NoxisNoHagent => "false",
|
||||
EnvVars::NoxisNoLogs => "false",
|
||||
EnvVars::NoxisRefreshLogs => "false",
|
||||
EnvVars::NoxisNoRemoteConfig => "false",
|
||||
EnvVars::NoxisNoConfigSub => "false",
|
||||
EnvVars::NoxisSocketPath => "/var/run/enode/hostagent.sock",
|
||||
EnvVars::NoxisLogTo => "./",
|
||||
EnvVars::NoxisRemoteServerUrl => "localhost",
|
||||
EnvVars::NoxisConfig => "./settings.json",
|
||||
EnvVars::NoxisMetrics => "full",
|
||||
}
|
||||
}
|
||||
fn process_env_var(self, preboot_value: &str) {
|
||||
// let default = self.default();
|
||||
match var(self.to_string()) {
|
||||
std::result::Result::Ok(val) => {
|
||||
if val != preboot_value {
|
||||
std::env::set_var(self.to_string(), self.default());
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
std::env::set_var(self.to_string(), preboot_value);
|
||||
},
|
||||
}
|
||||
}
|
||||
pub fn setup(preboot: &PrebootParams) {
|
||||
// setup default if not exists
|
||||
// check values and save preboot states in env vars if not equal
|
||||
|
||||
Self::NoxisNoHagent.process_env_var(&preboot.no_hostagent.to_string());
|
||||
Self::NoxisNoLogs.process_env_var(&preboot.no_logs.to_string());
|
||||
Self::NoxisRefreshLogs.process_env_var(&preboot.refresh_logs.to_string());
|
||||
Self::NoxisNoRemoteConfig.process_env_var(&preboot.no_remote_config.to_string());
|
||||
Self::NoxisNoConfigSub.process_env_var(&preboot.no_sub.to_string());
|
||||
Self::NoxisSocketPath.process_env_var(preboot.socket_path.to_str().unwrap());
|
||||
Self::NoxisLogTo.process_env_var(preboot.log_to.to_str().unwrap());
|
||||
Self::NoxisRemoteServerUrl.process_env_var(&preboot.remote_server_url);
|
||||
Self::NoxisConfig.process_env_var(preboot.config.to_str().unwrap());
|
||||
Self::NoxisMetrics.process_env_var(&preboot.metrics.to_string());
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/// # Enum `MetricsPrebootParams`
|
||||
/// ## for setting up metrics mode as preboot param from command prompt
|
||||
///
|
||||
/// examples:
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --metrics full
|
||||
/// noxis-rs ... --metrics system
|
||||
/// noxis-rs ... --metrics processes
|
||||
/// noxis-rs ... --metrics net
|
||||
/// noxis-rs ... --metrics none
|
||||
/// ```
|
||||
///
|
||||
#[derive(clap::ValueEnum, Debug, Clone)]
|
||||
pub enum MetricsPrebootParams {
|
||||
Full,
|
||||
System,
|
||||
Processes,
|
||||
Net,
|
||||
None,
|
||||
}
|
||||
|
||||
/// # `std::fmt::Display` implementation for `MetricsPrebootParams`
|
||||
/// ## to enable parsing object to String
|
||||
impl std::fmt::Display for MetricsPrebootParams {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
MetricsPrebootParams::Full => write!(f, "full"),
|
||||
MetricsPrebootParams::System => write!(f, "system"),
|
||||
MetricsPrebootParams::Processes => write!(f, "processes"),
|
||||
MetricsPrebootParams::Net => write!(f, "net"),
|
||||
MetricsPrebootParams::None => write!(f, "none"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// # struct `PrebootParams`
|
||||
/// ## to parse and set up all modes as preboot params from command prompt
|
||||
///
|
||||
/// ### args :
|
||||
///
|
||||
/// `--no-hagent` - to disable hagent work module and set up work mode as autonomous
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --no-hagent ...
|
||||
/// ```
|
||||
///
|
||||
///
|
||||
/// `--no-logs` - to disable logging at all
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --no-logs ...
|
||||
/// ```
|
||||
///
|
||||
/// `--refresh-logs` - to truncate logs directory
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --refresh-logs ...
|
||||
/// ```
|
||||
///
|
||||
/// `--no-remote-config` - to disable work with Redis as config producer
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --no-remote-config ...
|
||||
/// ```
|
||||
///
|
||||
/// `--no-sub` - to disable Redis subscribtion mechanism
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --no-sub ...
|
||||
/// ```
|
||||
///
|
||||
/// `--socket-path` - to set Unix Domain Socket file's directory
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --socket-path /var/run/enode/hostagent.sock ...
|
||||
/// ```
|
||||
///
|
||||
/// `--log-to` - to set directory for logs
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --log-to /dir/to/logs/ ...
|
||||
/// ```
|
||||
///
|
||||
/// `--remote-server-url` - to set Redis Server
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --remote-server-url 192.168.28.12 ...
|
||||
/// ```
|
||||
///
|
||||
/// `--config` - to set Noxis' config full path
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --config /etc/enode/settings.json ...
|
||||
/// ```
|
||||
///
|
||||
/// `--metrics` - to set metrics mode
|
||||
/// ### usage :
|
||||
/// ``` bash
|
||||
/// noxis-rs ... --metrics full ...
|
||||
/// ```
|
||||
#[derive(Debug, Parser)]
|
||||
pub struct PrebootParams {
|
||||
// actions
|
||||
#[arg(
|
||||
long = "no-hagent",
|
||||
action,
|
||||
conflicts_with="socket_path",
|
||||
help="To disable work with host-agent"
|
||||
)]
|
||||
pub no_hostagent : bool,
|
||||
#[arg(
|
||||
long = "no-logs",
|
||||
action,
|
||||
conflicts_with="log_to",
|
||||
help="To disable logs"
|
||||
)]
|
||||
pub no_logs: bool,
|
||||
#[arg(
|
||||
long = "refresh-logs",
|
||||
action,
|
||||
conflicts_with="no_logs",
|
||||
help="To clear logs directory"
|
||||
)]
|
||||
pub refresh_logs : bool,
|
||||
#[arg(
|
||||
long = "no-remote-config",
|
||||
action,
|
||||
help="To disable work with remote config server",
|
||||
conflicts_with="no_sub")]
|
||||
pub no_remote_config : bool,
|
||||
#[arg(
|
||||
long = "no-sub",
|
||||
action,
|
||||
help="To disable subscription mechanism",
|
||||
conflicts_with="no_remote_config")]
|
||||
pub no_sub : bool,
|
||||
|
||||
// params (socket_path, log_to, remote_server_url, config)
|
||||
#[arg(
|
||||
long = "socket-path",
|
||||
default_value="/var/run/enode/hostagent.sock",
|
||||
conflicts_with="no_hostagent",
|
||||
help="To set .sock file's path used in communication with host-agent"
|
||||
)]
|
||||
pub socket_path : PathBuf,
|
||||
#[arg(
|
||||
long = "log-to",
|
||||
default_value="./",
|
||||
conflicts_with="no_logs",
|
||||
help="To set a path to logs directory"
|
||||
)]
|
||||
pub log_to : PathBuf,
|
||||
#[arg(
|
||||
long = "remote-server-url",
|
||||
default_value="localhost",
|
||||
conflicts_with="no_remote_config",
|
||||
help = "To set url of remote config server using in remote config pulling mechanism"
|
||||
)]
|
||||
pub remote_server_url : String,
|
||||
#[arg(
|
||||
long = "config",
|
||||
short,
|
||||
default_value="settings.json",
|
||||
help="To set local config file path"
|
||||
)]
|
||||
pub config : PathBuf,
|
||||
|
||||
// value enum params (metrics)
|
||||
#[arg(
|
||||
long = "metrics",
|
||||
short,
|
||||
default_value_t=MetricsPrebootParams::Full,
|
||||
help="To set metrics grubbing mode"
|
||||
)]
|
||||
pub metrics: MetricsPrebootParams,
|
||||
}
|
||||
|
||||
/// # implementation for `MetricsPrebootParams`
|
||||
/// ## to enable validation mechanism
|
||||
impl PrebootParams {
|
||||
pub fn validate(mut self) -> Result<Self> {
|
||||
dotenv().ok();
|
||||
if !self.socket_path.exists() && !self.no_hostagent {
|
||||
if self.socket_path.to_string_lossy() == SOCKET_PATH {
|
||||
self.no_hostagent = true;
|
||||
eprintln!("Warning: Socket-file wasn't found. Working without hostagent module...");
|
||||
} else {
|
||||
eprintln!("Warning: Socket-file wasn't found or Noxis can't read it. Socket-file was set to default");
|
||||
if !PathBuf::from(SOCKET_PATH).exists() {
|
||||
self.no_hostagent = true;
|
||||
eprintln!("Warning: Socket-file wasn't found. Working without hostagent module...");
|
||||
} else {
|
||||
self.socket_path = PathBuf::from(SOCKET_PATH);
|
||||
}
|
||||
}
|
||||
// return Err(Error::msg("Socket-file not found or Noxis can't read it. Cannot start"));
|
||||
}
|
||||
// existing log dir
|
||||
if !self.log_to.exists() && !self.no_logs {
|
||||
eprintln!("Error: Log-Dir not found or Noxis can't read it. LogDir was set to default");
|
||||
self.log_to = PathBuf::from("./");
|
||||
// return Err(Error::msg("Log Directory Not Found or Noxis can't read it. Cannot start"));
|
||||
}
|
||||
// existing sock file
|
||||
if !self.config.exists() {
|
||||
eprintln!("Error: Invalid character in config file. Config path was set to default");
|
||||
let config = PathBuf::from("/etc/settings.json");
|
||||
if !config.exists() && self.no_remote_config {
|
||||
return Err(Error::msg("Noxis cannot run without config. Create local config or enable remote-config mechanism"));
|
||||
}
|
||||
self.config = PathBuf::from("settings.json");
|
||||
// return Err(Error::msg("Local Config Not Found or Noxis can't read it. Cannot start"));
|
||||
}
|
||||
// redis server check
|
||||
EnvVars::setup(&self);
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// unit tests of preboot params parsing mech
|
||||
#[cfg(test)]
|
||||
mod preboot_unitests{
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parsing_zero_args() {
|
||||
assert!(PrebootParams::try_parse_from(vec!["runner-rs"]).is_ok())
|
||||
}
|
||||
#[test]
|
||||
fn parsing_hagent_valid_args() {
|
||||
assert!(PrebootParams::try_parse_from(vec![
|
||||
"runner-rs",
|
||||
"--socket-path", "/path/to/socket"
|
||||
]).is_ok())
|
||||
}
|
||||
#[test]
|
||||
fn parsing_hagent_invalid_args() {
|
||||
assert!(PrebootParams::try_parse_from(vec![
|
||||
"runner-rs",
|
||||
"--socket-path", "/path/to/socket",
|
||||
"--no-hagent"
|
||||
]).is_err())
|
||||
}
|
||||
#[test]
|
||||
fn parsing_log_valid_args() {
|
||||
assert!(PrebootParams::try_parse_from(vec![
|
||||
"runner-rs",
|
||||
"--log-to", "/path/to/log/dir"
|
||||
]).is_ok())
|
||||
}
|
||||
#[test]
|
||||
fn parsing_log_invalid_args() {
|
||||
assert!(PrebootParams::try_parse_from(vec![
|
||||
"runner-rs",
|
||||
"--log-to /path/to/log/dir",
|
||||
"--no-logs"
|
||||
]).is_err())
|
||||
}
|
||||
#[test]
|
||||
fn parsing_config_valid_args() {
|
||||
assert!(PrebootParams::try_parse_from(vec![
|
||||
"runner-rs",
|
||||
"--no-sub",
|
||||
"--remote-server-url", "redis://127.0.0.1"
|
||||
]).is_ok())
|
||||
}
|
||||
#[test]
|
||||
fn parsing_config_invalid_args_noremote_nosub() {
|
||||
assert!(PrebootParams::try_parse_from(vec![
|
||||
"runner-rs",
|
||||
"--no-remote-config", "--no-sub"
|
||||
]).is_err())
|
||||
}
|
||||
#[test]
|
||||
fn parsing_config_invalid_args_noremote_remoteurl() {
|
||||
assert!(PrebootParams::try_parse_from(vec![
|
||||
"runner-rs",
|
||||
"--no-remote-config",
|
||||
"--remote-server-url", "redis://127.0.0.1"
|
||||
]).is_err())
|
||||
}
|
||||
#[test]
|
||||
fn parsing_metrics_args_using_value_enum() {
|
||||
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "full"]).is_ok());
|
||||
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "system"]).is_ok());
|
||||
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "processes"]).is_ok());
|
||||
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "net"]).is_ok());
|
||||
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "none"]).is_ok());
|
||||
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "unusual_value"]).is_err());
|
||||
}
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
use crate::options::structs::CustomError;
|
||||
use super::structs::CustomError;
|
||||
use std::sync::Arc;
|
||||
use tokio::io;
|
||||
use tokio::sync::mpsc;
|
||||
|
|
@ -1,3 +1,5 @@
|
|||
#![allow(dead_code)]
|
||||
|
||||
use std::net::Ipv4Addr;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
|
|
@ -5,6 +7,7 @@ use serde::{Deserialize, Serialize};
|
|||
pub enum CustomError {
|
||||
Fatal,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum ConfigActuality {
|
||||
Local,
|
||||
|
|
@ -18,7 +21,7 @@ pub enum ConfigActuality {
|
|||
///
|
||||
/// *depends on* : `TrackingProcess`
|
||||
///
|
||||
/// ```
|
||||
/// ``` json
|
||||
/// {
|
||||
/// -> "dateOfCreation": "1721381809104",
|
||||
/// -> "configServer": "localhost",
|
||||
|
|
@ -44,7 +47,7 @@ pub struct Processes {
|
|||
///
|
||||
/// *depends on* : `Dependencies`
|
||||
///
|
||||
/// ```
|
||||
/// ``` json
|
||||
/// ...
|
||||
/// "processes": [
|
||||
/// -> {
|
||||
|
|
@ -69,7 +72,7 @@ pub struct TrackingProcess {
|
|||
///
|
||||
/// *depends on* : `Files`, `Services`
|
||||
///
|
||||
/// ```
|
||||
/// ``` json
|
||||
/// ...
|
||||
/// "path": "/home/user/monitor/runner-rs/temp-process",
|
||||
/// -> "dependencies": {
|
||||
|
|
@ -93,7 +96,7 @@ pub struct Dependencies {
|
|||
///
|
||||
/// *depends on* : `FileTriggers`
|
||||
///
|
||||
/// ```
|
||||
/// ``` json
|
||||
/// ...
|
||||
/// "files": [
|
||||
/// -> {
|
||||
|
|
@ -118,7 +121,7 @@ pub struct Files {
|
|||
///
|
||||
/// *depends on* : `ServiceTriggers`
|
||||
///
|
||||
/// ```
|
||||
/// ``` json
|
||||
/// ...
|
||||
/// "services": [
|
||||
/// -> {
|
||||
|
|
@ -143,7 +146,7 @@ pub struct Services {
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
/// ```
|
||||
/// ``` json
|
||||
/// ...
|
||||
/// "port": 443,
|
||||
/// -> "triggers": {
|
||||
|
|
@ -168,7 +171,7 @@ pub struct ServiceTriggers {
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
/// ```
|
||||
/// ``` json
|
||||
/// ...
|
||||
/// "src": "/home/user/monitor/runner-rs/tests/examples/",
|
||||
/// -> "triggers": {
|
||||
|
|
@ -0,0 +1,272 @@
|
|||
pub mod files;
|
||||
pub mod hagent;
|
||||
pub mod metrics;
|
||||
pub mod prcs;
|
||||
pub mod services;
|
||||
|
||||
// TODO : saving current flags state
|
||||
|
||||
use crate::options::structs::CustomError;
|
||||
use crate::options::structs::TrackingProcess;
|
||||
use files::create_watcher;
|
||||
use files::file_handler;
|
||||
use inotify::Inotify;
|
||||
use log::{error, warn};
|
||||
use prcs::{
|
||||
freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process,
|
||||
unfreeze_process,
|
||||
};
|
||||
use services::service_handler;
|
||||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
use tokio::join;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::Duration;
|
||||
|
||||
const GET_ID_CMD: &str = "hostname";
|
||||
|
||||
/// # Fn `run_daemons`
|
||||
/// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel
|
||||
///
|
||||
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `&mut mpsc::Receiver<u8>`,
|
||||
///
|
||||
/// *output* : ()
|
||||
///
|
||||
/// *initiator* : main thread
|
||||
///
|
||||
/// *managing* : Arc to current process struct, Arc to managing channel writer, mut ref to managing channel reader
|
||||
///
|
||||
/// *depends on* : all module `prcs`'s functions, fn `running_handler`, fn `utils::files::create_watcher`
|
||||
///
|
||||
/// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process
|
||||
///
|
||||
pub async fn run_daemons(
|
||||
proc: Arc<TrackingProcess>,
|
||||
tx: Arc<mpsc::Sender<u8>>,
|
||||
rx: &mut mpsc::Receiver<u8>,
|
||||
) {
|
||||
// creating watchers + ---buffers---
|
||||
let mut watchers: Vec<Inotify> = vec![];
|
||||
for file in proc.dependencies.files.clone().into_iter() {
|
||||
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
|
||||
watchers.push(watcher);
|
||||
} else {
|
||||
let _ = tx.send(121).await;
|
||||
}
|
||||
// watchers.push(create_watcher(&file.filename, &file.src).await.unwrap());
|
||||
}
|
||||
let watchers_clone: Arc<tokio::sync::Mutex<Vec<Inotify>>> =
|
||||
Arc::new(tokio::sync::Mutex::new(watchers));
|
||||
|
||||
loop {
|
||||
let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone());
|
||||
tokio::select! {
|
||||
_ = run_hand => continue,
|
||||
_val = rx.recv() => {
|
||||
if process_protocol_symbol(proc.clone(), _val.unwrap()).await.is_err() {
|
||||
return;
|
||||
}
|
||||
},
|
||||
}
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_protocol_symbol(proc: Arc<TrackingProcess>, val: u8) -> Result<(), CustomError>{
|
||||
match val {
|
||||
// 1 - File-dependency handling error -> terminating (after waiting)
|
||||
1 => {
|
||||
if is_active(&proc.name).await {
|
||||
error!("File-dependency handling error: Terminating {} process ..." , &proc.name);
|
||||
terminate_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
// return;
|
||||
},
|
||||
// 2 - File-dependency handling error -> holding (after waiting)
|
||||
2 => {
|
||||
if !is_frozen(&proc.name).await {
|
||||
error!("File-dependency handling error: Freezing {} process ..." , &proc.name);
|
||||
freeze_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
},
|
||||
// 3 - Running process error
|
||||
3 => {
|
||||
error!("Error due to starting {} process", &proc.name);
|
||||
return Err(CustomError::Fatal)
|
||||
},
|
||||
// 4 - Timeout of waiting service-dependency -> staying (after waiting)
|
||||
4 => {
|
||||
// warn!("Timeout of waiting service-dependency: Ignoring on {} process ..." , &proc.name);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
},
|
||||
// 5 - Timeout of waiting service-dependency -> terminating (after waiting)
|
||||
5 => {
|
||||
if is_active(&proc.name).await {
|
||||
error!("Timeout of waiting service-dependency: Terminating {} process ..." , &proc.name);
|
||||
terminate_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
},
|
||||
// 6 - Timeout of waiting service-dependency -> holding (after waiting)
|
||||
6 => {
|
||||
// println!("holding {}-{}", proc.name, is_active(&proc.name).await);
|
||||
if !is_frozen(&proc.name).await {
|
||||
error!("Timeout of waiting service-dependency: Freezing {} process ..." , &proc.name);
|
||||
freeze_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
},
|
||||
// // 7 - File-dependency change -> terminating (after check)
|
||||
7 => {
|
||||
error!("File-dependency warning (file changed). Terminating {} process...", &proc.name);
|
||||
terminate_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
return Err(CustomError::Fatal)
|
||||
},
|
||||
// // 8 - File-dependency change -> restarting (after check)
|
||||
8 => {
|
||||
warn!("File-dependency warning (file changed). Restarting {} process...", &proc.name);
|
||||
let _ = restart_process(&proc.name, &proc.path).await;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
},
|
||||
// // 9 - File-dependency change -> staying (after check)
|
||||
9 => {
|
||||
// no need to trash logs
|
||||
warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
},
|
||||
|
||||
// 10 - Process unfreaze call via file handler (or service handler)
|
||||
10 | 11 => {
|
||||
if is_frozen(&proc.name).await {
|
||||
warn!("Unfreezing process {} call...", &proc.name);
|
||||
unfreeze_process(&proc.name).await;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
},
|
||||
// 11 - Process unfreaze call via service handler
|
||||
// 11 => {
|
||||
// if is_frozen(&proc.name).await {
|
||||
// warn!("Unfreezing process {} call...", &proc.name);
|
||||
// unfreeze_process(&proc.name).await;
|
||||
// }
|
||||
// tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
// },
|
||||
// 101 - Impermissible trigger values in JSON
|
||||
101 => {
|
||||
error!("Impermissible trigger values in JSON in {}'s block. Killing thread...", &proc.name);
|
||||
if is_active(&proc.name).await {
|
||||
terminate_process(&proc.name).await;
|
||||
}
|
||||
return Err(CustomError::Fatal)
|
||||
},
|
||||
//
|
||||
// 121 - Cannot create valid watcher for file dependency
|
||||
// todo : think about valid situation
|
||||
121 => {
|
||||
error!("Cannot create valid watcher for file dependency. Terminating {} process...", &proc.name);
|
||||
let _ = terminate_process(&proc.name).await;
|
||||
return Err(CustomError::Fatal)
|
||||
},
|
||||
// 111 - global thread termination with killing current child in a face
|
||||
// of a current process
|
||||
111 => {
|
||||
warn!("Terminating {}'s child processes...", &proc.name);
|
||||
match is_active(&proc.name).await {
|
||||
true => {
|
||||
terminate_process(&proc.name).await;
|
||||
},
|
||||
false => {
|
||||
log::info!("Process {} is already terminated!", proc.name);
|
||||
},
|
||||
}
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
// check process status daemon
|
||||
/// # Fn `run_daemons`
|
||||
/// ## func to async exec subjobs of checking process, services and files states
|
||||
///
|
||||
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `Arc<tokio::sync::Mutex<Vec<Inotify>>>`
|
||||
///
|
||||
/// *output* : ()
|
||||
///
|
||||
/// *initiator* : fn `run_daemons`
|
||||
///
|
||||
/// *managing* : Arc to current process struct, Arc to Mutex to list of file watchers
|
||||
///
|
||||
/// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}`
|
||||
///
|
||||
pub async fn running_handler(
|
||||
prc: Arc<TrackingProcess>,
|
||||
tx: Arc<mpsc::Sender<u8>>,
|
||||
watchers: Arc<tokio::sync::Mutex<Vec<Inotify>>>,
|
||||
) {
|
||||
// services and files check (once)
|
||||
let files_check = file_handler(
|
||||
&prc.name,
|
||||
&prc.dependencies.files,
|
||||
tx.clone(),
|
||||
watchers.clone(),
|
||||
);
|
||||
let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone());
|
||||
|
||||
let res = join!(files_check, services_check);
|
||||
// if inactive -> spawn checks -> active is true
|
||||
if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
|
||||
if start_process(&prc.name, &prc.path).await.is_err() {
|
||||
tx.send(3).await.unwrap();
|
||||
return;
|
||||
}
|
||||
}
|
||||
// if frozen -> spawn checks -> unfreeze is true
|
||||
else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
|
||||
tx.send(10).await.unwrap();
|
||||
return;
|
||||
}
|
||||
// tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
// todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}'
|
||||
/// # Fn `get_container_id`
|
||||
/// ## for getting container id used in logs
|
||||
///
|
||||
/// *input* : -
|
||||
///
|
||||
/// *output* : Some(String) if cont-id was grubbed | None - if not
|
||||
///
|
||||
/// *initiator* : fn `options::logger::setup_logger`
|
||||
///
|
||||
/// *managing* : -
|
||||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
pub fn get_container_id() -> Option<String> {
|
||||
match Command::new(GET_ID_CMD).output() {
|
||||
Ok(output) => {
|
||||
if !output.status.success() {
|
||||
return None;
|
||||
}
|
||||
let id = String::from_utf8_lossy(&output.stdout).to_string();
|
||||
if id.is_empty() {
|
||||
return None;
|
||||
}
|
||||
Some(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod utils_unittests {
|
||||
use super::get_container_id;
|
||||
#[test]
|
||||
fn check_if_container_id_can_be_grabed() {
|
||||
assert!(get_container_id().is_some());
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
use crate::options::structs::{CustomError, Files};
|
||||
use crate::utils::prcs::{is_active, is_frozen};
|
||||
use super::prcs::{is_active, is_frozen};
|
||||
use inotify::{EventMask, Inotify, WatchMask};
|
||||
use std::borrow::BorrowMut;
|
||||
use std::path::Path;
|
||||
|
|
@ -98,7 +98,10 @@ pub async fn file_handler(
|
|||
// * watcher recreation after dealing with file recreation mechanism in text editors
|
||||
let mutex = notify.borrow_mut();
|
||||
|
||||
*mutex = create_watcher(&file.filename, &file.src).await.unwrap();
|
||||
// *mutex = create_watcher(&file.filename, &file.src).await.unwrap();
|
||||
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
|
||||
*mutex = watcher;
|
||||
}
|
||||
}
|
||||
match file.triggers.on_change.as_str() {
|
||||
"stop" => {
|
||||
|
|
@ -159,22 +162,22 @@ mod files_unittests {
|
|||
use super::*;
|
||||
#[tokio::test]
|
||||
async fn try_to_create_watcher() {
|
||||
let res = create_watcher("dep-file", "/home/user/monitor/runner-rs/tests/examples/").await;
|
||||
let res = create_watcher("dep-file", "./tests/examples/").await;
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn try_to_create_invalid_watcher() {
|
||||
let res = create_watcher("invalid-file", "/path/to/the/hell").await;
|
||||
let res = create_watcher("invalid-file", "/path/to/the/no/dir").await;
|
||||
assert!(res.is_err());
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn check_existing_file() {
|
||||
let res = check_file("dep-file", "/home/user/monitor/runner-rs/tests/examples/").await;
|
||||
let res = check_file("dep-file", "./tests/examples/").await;
|
||||
assert!(res.is_ok());
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn check_non_existing_file() {
|
||||
let res = check_file("invalid-file", "/path/to/the/hell").await;
|
||||
let res = check_file("invalid-file", "/path/to/the/no/dir").await;
|
||||
assert!(res.is_err());
|
||||
}
|
||||
}
|
||||
|
|
@ -1,5 +1,11 @@
|
|||
//
|
||||
// module needed to check host-agent health condition and to communicate with it
|
||||
//
|
||||
use tokio::{io::Interest, net::UnixStream};
|
||||
use anyhow::{Ok, Result, Error};
|
||||
// to kill lint bug
|
||||
#[allow(unused_imports)]
|
||||
use tokio::net::UnixListener;
|
||||
|
||||
/// # Fn `open_unix_socket`
|
||||
/// ## opening unix-socket for host-agent communication
|
||||
|
|
@ -14,9 +20,10 @@ use tokio::{io::Interest, net::UnixStream};
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
async fn open_unix_socket() -> Result<UnixStream, std::io::Error> {
|
||||
let socket = UnixStream::connect("/var/run/enode/hostagent.sock").await?;
|
||||
Ok(socket)
|
||||
#[allow(dead_code)]
|
||||
async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error> {
|
||||
// "/var/run/enode/hostagent.sock"
|
||||
UnixStream::connect(sock_path).await
|
||||
}
|
||||
|
||||
/// # Fn `ha_healthcheck`
|
||||
|
|
@ -32,15 +39,11 @@ async fn open_unix_socket() -> Result<UnixStream, std::io::Error> {
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
async fn ha_healthcheck(socket: &UnixStream) -> Result<(), std::io::Error >{
|
||||
#[allow(dead_code)]
|
||||
async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
|
||||
socket.ready(Interest::WRITABLE).await?;
|
||||
if socket.writable().await.is_ok() {
|
||||
if let Err(er) = socket.try_write(b"Hello HAgent") {
|
||||
return Err(er);
|
||||
}
|
||||
} else {
|
||||
return Err(std::io::ErrorKind::WouldBlock.into());
|
||||
}
|
||||
socket.writable().await?;
|
||||
socket.try_write(b"Hello HAgent")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -57,34 +60,37 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), std::io::Error >{
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), std::io::Error > {
|
||||
#[allow(dead_code)]
|
||||
async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error > {
|
||||
socket.ready(Interest::WRITABLE).await?;
|
||||
if socket.writable().await.is_ok() {
|
||||
if let Err(er) = socket.try_write(data.as_bytes()) {
|
||||
return Err(er);
|
||||
}
|
||||
} else {
|
||||
return Err(std::io::ErrorKind::WouldBlock.into());
|
||||
}
|
||||
socket.writable().await?;
|
||||
socket.try_write(data.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod hagent_unittets {
|
||||
use super::*;
|
||||
#[tokio::test]
|
||||
// maybe bool : true -> alive, false -> dead
|
||||
// simple request on api
|
||||
async fn hagent_healthcheck() {
|
||||
let sock = open_unix_socket().await;
|
||||
assert!(sock.is_ok());
|
||||
let sock = sock.unwrap();
|
||||
assert!(ha_healthcheck(&sock).await.is_ok());
|
||||
const TEST_SOCKET: &str = "./tests/examples/hagent_test.sock";
|
||||
|
||||
async fn init_listener() -> UnixListener {
|
||||
let _ = std::fs::remove_file(TEST_SOCKET);
|
||||
UnixListener::bind(TEST_SOCKET).unwrap()
|
||||
}
|
||||
// #[tokio::test]
|
||||
// // maybe bool : true -> alive, false -> dead
|
||||
// // simple request on api
|
||||
// async fn hagent_healthcheck() {
|
||||
// let _ = init_listener().await;
|
||||
// let sock = open_unix_socket(TEST_SOCKET).await;
|
||||
// assert!(sock.is_ok());
|
||||
// let sock = sock.unwrap();
|
||||
// assert!(ha_healthcheck(&sock).await.is_ok());
|
||||
// }
|
||||
#[tokio::test]
|
||||
// --Result<maybe Response>
|
||||
// one-shot func
|
||||
async fn send_metrics_to_hagent() {
|
||||
async fn hagent_communication_test() {
|
||||
use crate::options::structs::{ProcessMetrics, ContainerMetrics, Metrics};
|
||||
|
||||
let procm = ProcessMetrics::new("test-prc", 15.0, 5.0);
|
||||
|
|
@ -92,7 +98,9 @@ mod hagent_unittets {
|
|||
let metrics = Metrics::new(contm, vec![procm]);
|
||||
let metrics = &serde_json::to_string_pretty(&metrics).unwrap();
|
||||
|
||||
let sock = open_unix_socket().await;
|
||||
#[allow(unused_mut)]
|
||||
let mut _list = init_listener().await;
|
||||
let sock = open_unix_socket(TEST_SOCKET).await;
|
||||
assert!(sock.is_ok());
|
||||
let sock = sock.unwrap();
|
||||
assert!(ha_healthcheck(&sock).await.is_ok());
|
||||
|
|
@ -101,6 +109,6 @@ mod hagent_unittets {
|
|||
}
|
||||
#[tokio::test]
|
||||
async fn open_unixsocket_test() {
|
||||
assert!(open_unix_socket().await.is_ok());
|
||||
assert!(open_unix_socket("non/valid/socket/file.sock").await.is_err());
|
||||
}
|
||||
}
|
||||
|
|
@ -7,7 +7,7 @@ use crate::options::structs::TrackingProcess;
|
|||
use sysinfo::{Process, System};
|
||||
use tokio::join;
|
||||
use crate::options::structs::{ProcessMetrics, ContainerMetrics};
|
||||
use crate::utils::get_container_id;
|
||||
use super::get_container_id;
|
||||
// use pcap::{Device, Capture, Active};
|
||||
// use std::net::Ipv4Addr;
|
||||
// use anyhow::{Result, Ok};
|
||||
|
|
@ -27,6 +27,7 @@ use crate::utils::get_container_id;
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
#[allow(dead_code)]
|
||||
pub async fn init_metrics_grubber() {
|
||||
let mut system = System::new();
|
||||
// let mut buffer: Vec<PacketInfo> = vec![];
|
||||
|
|
@ -39,6 +40,8 @@ pub async fn init_metrics_grubber() {
|
|||
// let _ = capture_packets(shared_buf.clone()).await;
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[allow(unused_variables)]
|
||||
async fn gather_metrics(proc: Arc<Process>) {
|
||||
|
||||
}
|
||||
|
|
@ -92,6 +95,7 @@ async fn gather_metrics(proc: Arc<Process>) {
|
|||
///
|
||||
/// *depends on* : `TrackingProcess`
|
||||
///
|
||||
#[allow(dead_code)]
|
||||
async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProcess>>) -> ContainerMetrics {
|
||||
let metrics = join!(
|
||||
get_cpu_metrics_container(sys.clone()),
|
||||
|
|
@ -119,6 +123,7 @@ async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProce
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
#[allow(dead_code)]
|
||||
async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 {
|
||||
sys.global_cpu_usage()
|
||||
}
|
||||
|
|
@ -136,6 +141,7 @@ async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 {
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
#[allow(dead_code)]
|
||||
async fn get_ram_metrics_container(sys: Arc<System>) -> f32 {
|
||||
(sys.used_memory() / sys.total_memory()) as f32 * 100.0
|
||||
}
|
||||
|
|
@ -156,6 +162,7 @@ async fn get_ram_metrics_container(sys: Arc<System>) -> f32 {
|
|||
///
|
||||
/// *depends on* : `TrackingProcess`
|
||||
///
|
||||
#[allow(dead_code)]
|
||||
async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> {
|
||||
prcs.iter().map(|process| process.name.clone()).collect()
|
||||
}
|
||||
|
|
@ -173,6 +180,7 @@ async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> {
|
|||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
#[allow(dead_code)]
|
||||
async fn get_all_metrics_process(proc: Arc<Process>, sys: Arc<System>) -> ProcessMetrics {
|
||||
let metrics = join!(
|
||||
get_cpu_metrics_process(proc.clone()),
|
||||
|
|
@ -233,14 +233,15 @@ mod process_unittests {
|
|||
// rewrite, its a pipe
|
||||
#[tokio::test]
|
||||
async fn full_cycle_with_restart() {
|
||||
let res1 = start_process("temp-process", "/home/user/monitor/runner-rs/temp-process").await;
|
||||
// let _ = std::io::stdout().write_all(b"");
|
||||
let res1 = start_process("restart-prc", "./tests/examples/restart-prc").await;
|
||||
assert!(res1.is_ok());
|
||||
let res2 =
|
||||
restart_process("temp-process", "/home/user/monitor/runner-rs/temp-process").await;
|
||||
restart_process("restart-prc", "./tests/examples/restart-prc").await;
|
||||
assert!(res2.is_ok());
|
||||
let _ = terminate_process("temp-process").await;
|
||||
let res3 = is_active("temp-process").await;
|
||||
assert!(res3);
|
||||
let _ = terminate_process("restart-prc").await;
|
||||
let res3 = is_active("restart-prc").await;
|
||||
assert!(!res3);
|
||||
}
|
||||
// rewrite, its a pipe
|
||||
#[tokio::test]
|
||||
|
|
@ -249,7 +250,10 @@ mod process_unittests {
|
|||
}
|
||||
#[tokio::test]
|
||||
async fn is_active_check() {
|
||||
assert!(is_active("systemd").await);
|
||||
let res1 = start_process("tmp-prc", "./tests/examples/tmp-prc").await;
|
||||
assert!(res1.is_ok());
|
||||
assert!(is_active("tmp-prc").await);
|
||||
let _ = terminate_process("tmp-prc").await;
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn isnt_active_check() {
|
||||
|
|
@ -257,11 +261,17 @@ mod process_unittests {
|
|||
}
|
||||
#[tokio::test]
|
||||
async fn is_frozen_check() {
|
||||
assert!(!is_frozen("systemd").await);
|
||||
let res1 = start_process("freeze-check", "./tests/examples/freeze-check").await;
|
||||
assert!(res1.is_ok());
|
||||
assert!(!is_frozen("freeze-check").await);
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn pidof_active_process() {
|
||||
assert!(get_pid("systemd").await.is_some());
|
||||
assert!(get_pid("pidof-prc").await.is_none());
|
||||
let res1 = start_process("pidof-prc", "./tests/examples/pidof-prc").await;
|
||||
assert!(res1.is_ok());
|
||||
assert!(get_pid("pidof-prc").await.is_some());
|
||||
let _ = terminate_process("pidof-prc").await;
|
||||
}
|
||||
|
||||
// broken mechanism need to check
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
use crate::options::structs::{CustomError, Services};
|
||||
use crate::utils::prcs::{is_active, is_frozen};
|
||||
use super::prcs::{is_active, is_frozen};
|
||||
use log::{error, warn};
|
||||
use std::net::{TcpStream, ToSocketAddrs};
|
||||
use std::sync::Arc;
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
265
src/utils.rs
265
src/utils.rs
|
|
@ -1,265 +0,0 @@
|
|||
pub mod files;
|
||||
pub mod hagent;
|
||||
pub mod metrics;
|
||||
pub mod prcs;
|
||||
pub mod services;
|
||||
|
||||
//
|
||||
|
||||
use crate::options::structs::TrackingProcess;
|
||||
use files::create_watcher;
|
||||
use files::file_handler;
|
||||
use inotify::Inotify;
|
||||
use log::{error, warn};
|
||||
use prcs::{
|
||||
freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process,
|
||||
unfreeze_process,
|
||||
};
|
||||
use services::service_handler;
|
||||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
use tokio::join;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::Duration;
|
||||
|
||||
const GET_ID_CMD: &str = "hostname";
|
||||
|
||||
/// # Fn `run_daemons`
|
||||
/// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel
|
||||
///
|
||||
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `&mut mpsc::Receiver<u8>`,
|
||||
///
|
||||
/// *output* : ()
|
||||
///
|
||||
/// *initiator* : main thread
|
||||
///
|
||||
/// *managing* : Arc to current process struct, Arc to managing channel writer, mut ref to managing channel reader
|
||||
///
|
||||
/// *depends on* : all module `prcs`'s functions, fn `running_handler`, fn `utils::files::create_watcher`
|
||||
///
|
||||
/// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process
|
||||
///
|
||||
pub async fn run_daemons(
|
||||
proc: Arc<TrackingProcess>,
|
||||
tx: Arc<mpsc::Sender<u8>>,
|
||||
rx: &mut mpsc::Receiver<u8>,
|
||||
) {
|
||||
// creating watchers + ---buffers---
|
||||
let mut watchers: Vec<Inotify> = vec![];
|
||||
for file in proc.dependencies.files.clone().into_iter() {
|
||||
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
|
||||
watchers.push(watcher);
|
||||
} else {
|
||||
let _ = tx.send(121).await;
|
||||
}
|
||||
// watchers.push(create_watcher(&file.filename, &file.src).await.unwrap());
|
||||
}
|
||||
let watchers_clone: Arc<tokio::sync::Mutex<Vec<Inotify>>> =
|
||||
Arc::new(tokio::sync::Mutex::new(watchers));
|
||||
|
||||
loop {
|
||||
let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone());
|
||||
tokio::select! {
|
||||
_ = run_hand => {},
|
||||
_val = rx.recv() => {
|
||||
match _val.unwrap() {
|
||||
// 1 - File-dependency handling error -> terminating (after waiting)
|
||||
1 => {
|
||||
if is_active(&proc.name).await {
|
||||
error!("File-dependency handling error: Terminating {} process ..." , &proc.name);
|
||||
terminate_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
return;
|
||||
},
|
||||
// 2 - File-dependency handling error -> holding (after waiting)
|
||||
2 => {
|
||||
if !is_frozen(&proc.name).await {
|
||||
error!("File-dependency handling error: Freezing {} process ..." , &proc.name);
|
||||
freeze_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
},
|
||||
// 3 - Running process error
|
||||
3 => {
|
||||
error!("Error due to starting {} process", &proc.name);
|
||||
break;
|
||||
},
|
||||
// 4 - Timeout of waiting service-dependency -> staying (after waiting)
|
||||
4 => {
|
||||
// warn!("Timeout of waiting service-dependency: Ignoring on {} process ..." , &proc.name);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
},
|
||||
// 5 - Timeout of waiting service-dependency -> terminating (after waiting)
|
||||
5 => {
|
||||
if is_active(&proc.name).await {
|
||||
error!("Timeout of waiting service-dependency: Terminating {} process ..." , &proc.name);
|
||||
terminate_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_millis(1000)).await;
|
||||
}
|
||||
},
|
||||
// 6 - Timeout of waiting service-dependency -> holding (after waiting)
|
||||
6 => {
|
||||
// println!("holding {}-{}", proc.name, is_active(&proc.name).await);
|
||||
if !is_frozen(&proc.name).await {
|
||||
error!("Timeout of waiting service-dependency: Freezing {} process ..." , &proc.name);
|
||||
freeze_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
},
|
||||
// // 7 - File-dependency change -> terminating (after check)
|
||||
7 => {
|
||||
error!("File-dependency warning (file changed). Terminating {} process...", &proc.name);
|
||||
terminate_process(&proc.name).await;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
return;
|
||||
},
|
||||
// // 8 - File-dependency change -> restarting (after check)
|
||||
8 => {
|
||||
warn!("File-dependency warning (file changed). Restarting {} process...", &proc.name);
|
||||
let _ = restart_process(&proc.name, &proc.path).await;
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
},
|
||||
// // 9 - File-dependency change -> staying (after check)
|
||||
9 => {
|
||||
// no need to trash logs
|
||||
warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
},
|
||||
|
||||
// 10 - Process unfreaze call via file handler (or service handler)
|
||||
10 | 11 => {
|
||||
if is_frozen(&proc.name).await {
|
||||
warn!("Unfreezing process {} call...", &proc.name);
|
||||
unfreeze_process(&proc.name).await;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
},
|
||||
// 11 - Process unfreaze call via service handler
|
||||
// 11 => {
|
||||
// if is_frozen(&proc.name).await {
|
||||
// warn!("Unfreezing process {} call...", &proc.name);
|
||||
// unfreeze_process(&proc.name).await;
|
||||
// }
|
||||
// tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
// },
|
||||
// 101 - Impermissible trigger values in JSON
|
||||
101 => {
|
||||
error!("Impermissible trigger values in JSON in {}'s block. Killing thread...", proc.name);
|
||||
if is_active(&proc.name).await {
|
||||
terminate_process(&proc.name).await;
|
||||
}
|
||||
break;
|
||||
},
|
||||
//
|
||||
// 121 - Cannot create valid watcher for file dependency
|
||||
121 => {
|
||||
error!("Cannot create valid watcher for {}'s file dependency. Terminating thread...", proc.name);
|
||||
let _ = terminate_process("runner-rs").await;
|
||||
break;
|
||||
},
|
||||
// 111 - global thread termination with killing current child in a face
|
||||
// of a current process
|
||||
111 => {
|
||||
warn!("Terminating {}'s child processes...", &proc.name);
|
||||
match is_active(&proc.name).await {
|
||||
true => {
|
||||
terminate_process(&proc.name).await;
|
||||
},
|
||||
false => {
|
||||
log::info!("Process {} is already terminated!", proc.name);
|
||||
},
|
||||
}
|
||||
break;
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
},
|
||||
}
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
// check process status daemon
|
||||
/// # Fn `run_daemons`
|
||||
/// ## func to async exec subjobs of checking process, services and files states
|
||||
///
|
||||
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `Arc<tokio::sync::Mutex<Vec<Inotify>>>`
|
||||
///
|
||||
/// *output* : ()
|
||||
///
|
||||
/// *initiator* : fn `run_daemons`
|
||||
///
|
||||
/// *managing* : Arc to current process struct, Arc to Mutex to list of file watchers
|
||||
///
|
||||
/// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}`
|
||||
///
|
||||
pub async fn running_handler(
|
||||
prc: Arc<TrackingProcess>,
|
||||
tx: Arc<mpsc::Sender<u8>>,
|
||||
watchers: Arc<tokio::sync::Mutex<Vec<Inotify>>>,
|
||||
) {
|
||||
// services and files check (once)
|
||||
let files_check = file_handler(
|
||||
&prc.name,
|
||||
&prc.dependencies.files,
|
||||
tx.clone(),
|
||||
watchers.clone(),
|
||||
);
|
||||
let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone());
|
||||
|
||||
let res = join!(files_check, services_check);
|
||||
// if inactive -> spawn checks -> active is true
|
||||
if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
|
||||
if start_process(&prc.name, &prc.path).await.is_err() {
|
||||
tx.send(3).await.unwrap();
|
||||
return;
|
||||
}
|
||||
}
|
||||
// if frozen -> spawn checks -> unfreeze is true
|
||||
else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
|
||||
tx.send(10).await.unwrap();
|
||||
return;
|
||||
}
|
||||
// tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
// todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}'
|
||||
/// # Fn `get_container_id`
|
||||
/// ## for getting container id used in logs
|
||||
///
|
||||
/// *input* : -
|
||||
///
|
||||
/// *output* : Some(String) if cont-id was grubbed | None - if not
|
||||
///
|
||||
/// *initiator* : fn `options::logger::setup_logger`
|
||||
///
|
||||
/// *managing* : -
|
||||
///
|
||||
/// *depends on* : -
|
||||
///
|
||||
pub fn get_container_id() -> Option<String> {
|
||||
match Command::new(GET_ID_CMD).output() {
|
||||
Ok(output) => {
|
||||
if !output.status.success() {
|
||||
return None;
|
||||
}
|
||||
let id = String::from_utf8_lossy(&output.stdout).to_string();
|
||||
if id.is_empty() {
|
||||
return None;
|
||||
}
|
||||
Some(String::from_utf8_lossy(&output.stdout).to_string())
|
||||
}
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod utils_unittests {
|
||||
use super::get_container_id;
|
||||
#[test]
|
||||
fn check_if_container_id_can_be_grabed() {
|
||||
assert!(get_container_id().is_some());
|
||||
}
|
||||
}
|
||||
BIN
temp-process
BIN
temp-process
Binary file not shown.
Loading…
Reference in New Issue