Compare commits

...

99 Commits

Author SHA1 Message Date
YurijO f6e440bc62 Merge pull request 'rc' (#37) from rc into master
Reviewed-on: http://192.168.2.61/VladislavD/runner-rs/pulls/37
2025-01-23 12:52:15 +03:00
yuobrezkov b425b17d25 Disabled interpolation in Jenkinsfile for last stage
test-org/runner-rs/pipeline/pr-master There was a failure building this commit Details
2025-01-23 12:36:22 +03:00
yuobrezkov 3d179713a8 Changed logic post deleting
test-org/runner-rs/pipeline/pr-master There was a failure building this commit Details
2025-01-23 12:28:16 +03:00
yuobrezkov abafa99e0e Changed prod creds
test-org/runner-rs/pipeline/pr-master There was a failure building this commit Details
2025-01-23 12:17:42 +03:00
yuobrezkov 35dbce562e Changed Gitea creds
test-org/runner-rs/pipeline/pr-master There was a failure building this commit Details
2025-01-23 11:54:41 +03:00
yuobrezkov fb3db62728 Changed post logic of pipeline 2025-01-23 11:36:57 +03:00
yuobrezkov c23c437abb Changed version getting3
test-org/runner-rs/pipeline/pr-master There was a failure building this commit Details
2025-01-22 18:39:02 +03:00
yuobrezkov 8174affa51 Changed version getting2
test-org/runner-rs/pipeline/pr-master There was a failure building this commit Details
2025-01-22 18:33:16 +03:00
yuobrezkov 504bc7df84 Changed version getting
test-org/runner-rs/pipeline/pr-master There was a failure building this commit Details
2025-01-22 18:27:06 +03:00
YurijO 57195a71ae Merge pull request 'cicd check' (#29) from feature/800 into rc
test-org/runner-rs/pipeline/pr-master There was a failure building this commit Details
2025-01-22 18:12:04 +03:00
prplV a9351a8cb2 cicd check
test-org/runner-rs/pipeline/pr-rc Build started... Details
2025-01-22 18:08:10 +03:00
YurijO 85b9c3175c Merge pull request 'Changed Jenkinsfile for deb packages' (#28) from feature/jenkinsfile-fix into rc
Reviewed-on: http://192.168.2.61/VladislavD/runner-rs/pulls/28
2025-01-22 18:05:43 +03:00
yuobrezkov 780a3c1a37 Changed Jenkinsfile for deb packages
test-org/runner-rs/pipeline/pr-rc This commit looks good Details
2025-01-22 17:58:35 +03:00
YurijO e5e75a417f Merge pull request 'Changed Ubuntu version' (#25) from bugfix/glib-fix into rc 2025-01-21 14:26:49 +03:00
yuobrezkov 512df7ebee Changed Ubuntu version
test-org/runner-rs/pipeline/pr-rc Build started... Details
2025-01-21 14:25:26 +03:00
YurijO 8cd4a6dad7 Merge pull request 'Added build.sh with supporting amd64 riscv64 binaries' (#24) from feature/724 into rc 2025-01-21 14:07:48 +03:00
yuobrezkov f08b54d51e Added build.sh with supporting amd64 riscv64 binaries
test-org/runner-rs/pipeline/pr-rc Build started... Details
2025-01-21 14:02:55 +03:00
YurijO d6de6948f8 Merge pull request 'cli: self made error type with thiserror crate' (#23) from feature/thiserror into rc 2025-01-20 15:58:03 +03:00
prplV 5cfae2c246 cli: self made error type with thiserror crate
test-org/runner-rs/pipeline/pr-rc Build started... Details
2025-01-20 15:51:49 +03:00
prplV 0b61f56bca work without valid local conf checked 2025-01-20 12:00:59 +03:00
YurijO ebff93698c Merge pull request 'feature/envvars' (#21) from feature/envvars into rc 2025-01-20 11:01:18 +03:00
prplV d400318aad version update
test-org/runner-rs/pipeline/pr-rc Build started... Details
2025-01-15 16:31:16 +03:00
prplV b67feb8ce5 dotenv support + gitignore update 2025-01-15 16:30:17 +03:00
prplV 0112066418 setting up env vars + comaping with preboot values 2025-01-15 16:25:53 +03:00
prplV eed9fa881a default impl self-written + config preboot param check fixed 2025-01-15 13:17:14 +03:00
prplV 5d54c5b97c env vars enum + display-default impl 2025-01-15 12:51:22 +03:00
YurijO cc8c7f19bf Merge pull request 'init commit' (#20) from feature/test into rc 2025-01-13 16:59:26 +03:00
yuobrezkov 390f496004 Modified Dockerfile
test-org/runner-rs/pipeline/pr-rc Build started... Details
2025-01-13 16:56:48 +03:00
yuobrezkov f6c952f208 changed creating folders for binaries
test-org/runner-rs/pipeline/pr-rc There was a failure building this commit Details
2025-01-13 16:30:06 +03:00
yuobrezkov 497dcbaeb6 Added / for target folders
test-org/runner-rs/pipeline/pr-rc There was a failure building this commit Details
2025-01-13 16:25:04 +03:00
yuobrezkov 0e959b9a58 Changed Jenkinsfile
test-org/runner-rs/pipeline/pr-rc There was a failure building this commit Details
2025-01-13 15:47:44 +03:00
prplV d229ae1ce9 cicd fix#2
test-org/runner-rs/pipeline/pr-rc There was a failure building this commit Details
2025-01-13 15:31:51 +03:00
prplV 576a5e0739 cicd fix with dep installing
test-org/runner-rs/pipeline/pr-rc There was a failure building this commit Details
2025-01-13 15:25:10 +03:00
prplV a25a630d77 test fix not to test bash code )
test-org/runner-rs/pipeline/pr-rc There was a failure building this commit Details
2025-01-13 15:17:32 +03:00
prplV e7e7eb99d8 init commit
test-org/runner-rs/pipeline/pr-rc There was a failure building this commit Details
2025-01-13 15:09:53 +03:00
yuobrezkov 5486b6d584 Added Jenkinsfile 2025-01-13 13:29:10 +03:00
yuobrezkov e59b3f9d06 Added Dockerfile 2025-01-13 13:24:23 +03:00
Vladislav Drozdov aaa3459920 Merge pull request 'dev' (#19) from dev into master
Reviewed-on: http://git.enode/VladislavD/runner-rs/pulls/19
2025-01-13 13:20:57 +03:00
prplV 204d284871 deleted useless Dockerfile 2025-01-13 13:19:34 +03:00
prplV 7e0d22d4e0 fixed issue#18 2025-01-13 13:18:20 +03:00
prplV 3d12137052 useless log deleted 2025-01-13 12:52:36 +03:00
prplV 4d2fe57680 cli and daemon "PING-PONG" communication 2025-01-09 16:36:33 +03:00
prplV 014e8dd56d noxis cli protocol now developed 2025-01-09 15:10:34 +03:00
prplV 8b123cd593 import refactor 2024-12-27 13:47:54 +03:00
prplV ce067efffd ISSUE#16 2024-12-23 17:35:23 +03:00
prplV 426e287866 fix#2 2024-12-23 15:02:51 +03:00
prplV c03981186a preboot doc-comms fix 2024-12-23 14:34:47 +03:00
prplV 939bdc6676 added preboot doc-comms 2024-12-23 14:07:22 +03:00
prplV 97bc91ffcd added cli_pipeline doc-comms 2024-12-23 09:44:53 +03:00
prplV 7be46d4373 no warn in preboot.rs and config.rs + no dbg 2024-12-18 11:02:49 +03:00
prplV 1578216712 [ISSUE#12 "preload errors without returning !!!!"] preload params validation mech fix 2024-12-18 10:55:13 +03:00
prplV 5a1588e256 config_path unwrapping mechanism 2024-12-18 10:32:11 +03:00
Vladislav Drozdov 4fb3533074 Merge pull request 'master' (#15) from master into dev
Reviewed-on: http://git.enode/VladislavD/runner-rs/pulls/15
2024-12-18 10:22:17 +03:00
Vladislav Drozdov 7fa9d02343 Merge pull request 'feature/preload-cli-adj' (#14) from feature/preload-cli-adj into master
Reviewed-on: http://git.enode/VladislavD/runner-rs/pulls/14
2024-12-18 10:20:57 +03:00
prplV 56a20eb65c preboot fix work with config params + refactor 2024-12-16 11:44:34 +03:00
prplV 2dbfb4a93a preboot fix work with params 2024-12-16 11:34:09 +03:00
prplV 77a1e24a47 break added 2024-12-12 17:55:41 +03:00
prplV b51a3fb0f0 submodules adj 2024-12-12 17:04:20 +03:00
prplV a75160c3e2 cli communication added 2024-12-12 17:03:57 +03:00
prplV 6e86dcbf09 config adj fix 2024-12-11 13:42:11 +03:00
prplV da3d8cd129 config preboot adj 2024-12-11 13:38:29 +03:00
prplV e7817a97b6 () -> anyhow::result<()> 2024-12-11 13:11:15 +03:00
prplV 2d225f4c09 versions fix 2024-12-10 18:50:30 +03:00
prplV 56769f54b9 new listener func 2024-12-10 18:23:17 +03:00
prplV 6469130662 added lib to export structs + func to init listener in noxis-rs 2024-12-10 18:23:02 +03:00
prplV b6ecb10a77 utils rework 2024-12-10 18:22:16 +03:00
prplV 1c6729daab import serde + little fixes 2024-12-10 14:10:04 +03:00
prplV 5bba6e8b82 added with-flags param to start/restart cmd 2024-12-10 13:48:56 +03:00
prplV a20fcf58f2 fix info output 2024-12-10 13:36:27 +03:00
prplV 537e853706 fixed test 2024-12-06 14:52:43 +03:00
Vladislav Drozdov fb0568bee6 Merge pull request 'preboot to master' (#11) from preboot into master
Reviewed-on: http://git.enode/VladislavD/runner-rs/pulls/11
2024-12-06 11:26:05 +03:00
prplV 93917800f0 README update 2024-12-06 11:20:18 +03:00
prplV 9d14658fd0 cli added 2024-12-04 16:13:22 +03:00
prplV 1b12ecc67f critical bug in files fixed + cli created 2024-12-04 12:55:26 +03:00
prplV 5a9bf795e9 gr v2 2024-12-04 11:23:42 +03:00
prplV d67e77c5cc global refactor to work as workspace 2024-12-04 11:23:18 +03:00
Vladislav Drozdov f3e9cb92df Merge pull request 'preboot' (#10) from preboot into master
Reviewed-on: http://192.168.2.61/VladislavD/runner-rs/pulls/10
2024-12-03 18:20:34 +03:00
prplV 28f0eb53f6 hagent tests finally fixed 2024-12-03 18:16:29 +03:00
prplV af12a1fef1 + validate func 2024-12-03 17:23:48 +03:00
prplV dbb49de09c preboot unitetst + refactor 2024-12-03 15:25:24 +03:00
prplV 74e3b3ab31 no warnings 2024-12-03 13:11:19 +03:00
prplV 8a47ffe851 prcs refactor 2024-12-03 13:09:59 +03:00
prplV 345abde741 no metrics warnings 2024-12-03 13:09:02 +03:00
prplV ba395543e4 no warnings 2024-12-03 13:06:17 +03:00
prplV 591a1df5d6 preboot settings added 2024-12-03 12:53:19 +03:00
prplV bda4762713 delete sock file 2024-12-03 10:43:19 +03:00
prplV 56caa68135 no tracking sock file 2024-12-03 10:42:46 +03:00
prplV d3764448aa hagent unittests fixed 2024-12-02 14:04:09 +03:00
prplV 3a51ea1418 prc unit-tests fix 2024-12-02 11:42:17 +03:00
prplV 363d38bdb6 pidof unitest fix 2024-11-29 16:02:44 +03:00
prplV 9b9b8d0b13 unittests fix for Docker 2024-11-28 18:30:12 +03:00
prplV 627a132e53 with no output now 2024-11-28 14:29:28 +03:00
prplV cc18b1cfe6 locl deleted 2024-11-26 17:55:34 +03:00
prplV 1884469c9e prcs unittests fix --no-abs-path 2024-11-26 17:46:03 +03:00
prplV 406b199b09 exec files needed in tests added 2024-11-26 17:42:52 +03:00
prplV 150bb87a41 files unit tests fix 2024-11-26 17:40:11 +03:00
prplV f4217cf8ad doctests fix 2024-11-26 17:11:06 +03:00
YurijO b2ce9acf50 Обновить settings.json 2024-11-26 15:41:42 +03:00
Vladislav Drozdov 36a81c9bf2 Merge pull request 'master to dev' (#9) from master into dev
Reviewed-on: http://192.168.2.61/VladislavD/runner-rs/pulls/9
2024-11-19 12:10:32 +03:00
45 changed files with 1548 additions and 1501 deletions

5
.gitignore vendored
View File

@ -1,5 +1,6 @@
/target /target
.idea .idea
Dockerfile /.env
Cargo.lock Cargo.lock
settings.json hagent_test.sock
release

1089
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +1,12 @@
[package] [workspace]
name = "runner-rs" resolver = "2"
version = "0.9.25" members = [
edition = "2021" "noxis-rs",
"noxis-cli",
]
[profile.dev] [profile.dev]
debug = true debug = true
[dependencies] [profile.test]
anyhow = "1.0.93" debug = false
chrono = "0.4.38"
env_logger = "0.11.3"
inotify = "0.10.2"
log = "0.4.22"
pcap = "2.2.0"
redis = "0.25.4"
serde = { version = "1.0.203", features = ["derive"] }
serde_json = "1.0.118"
sysinfo = "0.32.0"
tokio = { version = "1.38.0", features = ["full", "time"] }

View File

@ -1,25 +1,31 @@
FROM ubuntu FROM ubuntu:22.04
RUN mkdir -p /usr/src/kii/ USER root
RUN apt update && apt install -y \
curl \
build-essential \
libssl-dev \
pkg-config \
libudev-dev \
procps \
gcc-riscv64-unknown-elf \
gcc-riscv64-linux-gnu \
binutils-riscv64-linux-gnu \
&& rm -rf /var/lib/apt/lists/*
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
WORKDIR /usr/src/kii/ WORKDIR /usr/src/kii/
RUN mkdir monitor/ COPY . ./
RUN mkdir -p services/temp-process/
RUN touch services/temp-process/dep.txt
RUN touch services/temp-process/run.sh
RUN echo "./services/temp-process/temp-process &>/dev/null" >> services/temp-process/run.sh
COPY target/x86_64-unknown-linux-gnu/release/runner-rs monitor/ RUN chmod +x noxis-rs/temp-process
COPY settings.json .
COPY temp-process services/temp-process/
RUN chmod +x services/temp-process/temp-process RUN rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu
RUN chmod +x services/temp-process/run.sh
RUN chmod +x monitor/runner-rs
# some troubles with execution this row-cmd RUN cargo unibuild
# ?: cannot get while initializing container
RUN export ENODE_CID=$(cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' "{print \$6}")
ENTRYPOINT [ "/usr/src/kii/monitor/runner-rs" ] ENTRYPOINT ["cargo", "test"]

243
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,243 @@
pipeline {
agent any
stages {
stage('Tests and compiling binaries') {
when {
expression { env.CHANGE_BRANCH?.startsWith('feature/') || env.CHANGE_BRANCH?.startsWith('rc') }
}
steps {
script {
echo "Building and running tests in Docker for feature branch..."
try {
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
sh "mkdir -p ${targetDirAmd}"
sh "mkdir -p ${targetDirRisc}"
sh """
docker build --network=host -t e-monitor .
docker run --name e-monitor --dns 8.8.8.8 --network=host e-monitor:latest
"""
sh "cp noxis-rs/settings.json ${targetDirAmd}"
sh "cp noxis-rs/settings.json ${targetDirRisc}"
sh "docker cp e-monitor:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-cli ${targetDirAmd}"
sh "docker cp e-monitor:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-rs ${targetDirAmd}"
sh "docker cp e-monitor:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-cli ${targetDirRisc}"
sh "docker cp e-monitor:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-rs ${targetDirRisc}"
echo "Tests passed successfully and binaries were extracted!"
} catch (Exception e) {
echo "Tests failed during Docker run."
error "Build failed at 'CI for feature' stage."
}
}
}
}
stage('Calculate Install Size') {
when {
expression { env.CHANGE_BRANCH?.startsWith('rc') }
}
steps {
script {
echo "Calculating installation size for rc branch..."
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
def installSizeAmd = sh(script: "du -s --block-size=1024 ${targetDirAmd} | awk '{print \$1}'", returnStdout: true).trim()
def installSizeRisc = sh(script: "du -s --block-size=1024 ${targetDirRisc} | awk '{print \$1}'", returnStdout: true).trim()
env.INSTALL_SIZE_AMD = installSizeAmd
env.INSTALL_SIZE_RISC = installSizeRisc
echo "Installation size for amd64: ${env.INSTALL_SIZE_AMD} kB"
echo "Installation size for riscv64: ${env.INSTALL_SIZE_RISC} kB"
}
}
}
stage('Create Deb Packages') {
when {
expression { env.CHANGE_BRANCH?.startsWith('rc') }
}
steps {
script {
echo "Creating deb packages for rc branch..."
def targetDirAmd = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/amd64"
def targetDirRisc = "${env.WORKSPACE}/${env.CHANGE_BRANCH}/riscv64"
def packageName = "noxis"
def version = sh(script: "git describe --tags --abbrev=0", returnStdout: true).trim()
def createDebPackage = { arch, binDir, targetDir, installSize ->
echo "Creating deb package for ${arch}..."
sh """
mkdir -p ${targetDir}/package/DEBIAN
mkdir -p ${targetDir}/package/usr/local/enode/${packageName}
mkdir -p ${targetDir}/package/usr/bin
mkdir -p ${targetDir}/package/etc/enode
mkdir -p ${targetDir}/package/lib/systemd/system
cp ${binDir}/noxis-cli ${targetDir}/package/usr/local/enode/${packageName}/
cp ${binDir}/noxis-rs ${targetDir}/package/usr/local/enode/${packageName}/
cp ${binDir}/settings.json ${targetDir}/package/etc/enode/
cat > ${targetDir}/package/DEBIAN/control <<EOF
Package: ${packageName}
Version: ${version}
Section: unknown
Priority: optional
Architecture: ${arch}
Maintainer: kis <supervisor@rosatom.ru>
Description: Noxis Agent Linux
Installed-Size: ${installSize}
EOF
chmod +x ${targetDir}/package/usr/local/enode/${packageName}/noxis-cli
chmod +x ${targetDir}/package/usr/local/enode/${packageName}/noxis-rs
cat > ${targetDir}/package/DEBIAN/postinst <<EOF
#!/bin/bash
ln -sf "/usr/local/enode/${packageName}/noxis-cli" "/usr/bin/noxis-cli"
ln -sf "/usr/local/enode/${packageName}/noxis-rs" "/usr/bin/noxis-rs"
systemctl daemon-reload
systemctl start ${packageName}.service
EOF
chmod +x ${targetDir}/package/DEBIAN/postinst
cat > ${targetDir}/package/lib/systemd/system/${packageName}.service <<EOF
[Unit]
Description=Noxis Service
After=network.target
[Service]
ExecStart=/usr/local/enode/${packageName}/noxis-rs
Restart=always
[Install]
WantedBy=multi-user.target
EOF
dpkg-deb --build ${targetDir}/package ${targetDir}/rc/${arch}/${packageName}_${version}_${arch}.deb
echo "${packageName}_${version}_${arch}.deb created successfully!"
"""
}
createDebPackage("amd64", targetDirAmd, env.WORKSPACE, env.INSTALL_SIZE_AMD)
createDebPackage("riscv64", targetDirRisc, env.WORKSPACE, env.INSTALL_SIZE_RISC)
env.DEB_PATH_AMD64 = "${env.WORKSPACE}/rc/amd64/${packageName}_${version}_amd64.deb"
env.DEB_PATH_RISCV64 = "${env.WORKSPACE}/rc/riscv64/${packageName}_${version}_riscv64.deb"
}
}
}
stage('Transfer Binaries') {
when {
expression { env.CHANGE_BRANCH?.startsWith('feature/') }
}
steps {
script {
echo "Transferring binaries packages to remote machine..."
withCredentials([usernamePassword(credentialsId: 'ift', passwordVariable: 'SSH_PASS', usernameVariable: 'SSH_USER')]) {
def targetDir = "${env.WORKSPACE}/${env.CHANGE_BRANCH}"
def remote = [:]
remote.name = "remote-server"
remote.host = "192.168.2.33"
remote.user = SSH_USER
remote.password = SSH_PASS
remote.allowAnyHosts = true
sshPut remote: remote, from: "${targetDir}", into: "/home/user/deployments/"
echo "Binaries successfully transferred to remote machine."
}
}
}
}
stage('Upload Debs to Repository') {
when {
expression { env.CHANGE_BRANCH?.startsWith('rc') }
}
steps {
script {
echo "Uploading deb packages to remote repository..."
withCredentials([usernamePassword(credentialsId: 'prod', passwordVariable: 'SSH_PASS', usernameVariable: 'SSH_USER')]) {
def remote = [:]
remote.name = "remote-server"
remote.host = "192.168.2.99"
remote.user = SSH_USER
remote.password = SSH_PASS
remote.allowAnyHosts = true
echo "Uploading deb packages using sshPut..."
sshPut remote: remote, from: "${env.DEB_PATH_AMD64}", into: "/home/user/repo/debs/"
sshPut remote: remote, from: "${env.DEB_PATH_RISCV64}", into: "/home/user/repo/debs/"
echo "Running repository update commands via sshCommand..."
sshCommand remote: remote, command: '''
export DEBIAN_FRONTEND=noninteractive
cd /home/user/repo/debs/
for deb in *.deb; do
reprepro -b /var/www/deb/debian/ includedeb stable $deb
done
rm -f *.deb
'''
echo "Deb packages successfully uploaded and added to the repository!"
}
}
}
}
}
post {
always {
script {
echo "Cleaning up workspace..."
try {
if (fileExists("${env.WORKSPACE}/package/")) {
sh "rm -rf ${env.WORKSPACE}/package/"
}
if (fileExists("${env.WORKSPACE}/rc/")) {
sh "rm -rf ${env.WORKSPACE}/rc/"
}
sh "docker stop e-monitor && docker rm e-monitor"
} catch (Exception e) {
echo "Failed to clean up workspace: ${e}"
}
}
}
success {
script {
when {
expression { env.CHANGE_BRANCH?.startsWith('rc') }
}
echo "Attempting to merge PR ${env.CHANGE_ID} into master..."
withCredentials([usernamePassword(credentialsId: 'gitea_creds', usernameVariable: 'GITEA_USER', passwordVariable: 'GITEA_PASS')]) {
def prId = env.CHANGE_ID
sh """
curl -X POST \
-u "${GITEA_USER}:${GITEA_PASS}" \
-H "Content-Type: application/json" \
-d '{"Do":"merge"}' \
http://git.entcor/api/v1/repos/VladislavD/runner-rs/pulls/${prId}/merge
"""
echo "PR ${prId} merged successfully into master!"
}
}
}
failure {
echo "Pipeline failed. Check the logs for details."
}
aborted {
echo "Pipeline was aborted."
}
}
}

View File

@ -1,13 +1,16 @@
# runner-rs ( with amd64 and riscv64 support ) # noxis-rs
![Logo](https://blog.desdelinux.net/wp-content/uploads/2023/07/rust-logo.png) ![Logo](logo.png)
in-container integrating util to handle processes runtime ### In-container integrating util to handle processes runtime
( with amd64 and riscv64 support )
## Depends on ## Depends on
- `rustup (>=1.27.1)` - `rustup (>=1.27.1)`
- `gcc-riscv64-unknown-elf` - `gcc-riscv64-unknown-elf`
- `build-essential` - `build-essential`
- `gcc-riscv64-linux-gnu`
- `binutils-riscv64-linux-gnu`
## Setting up ## Setting up
Download and execute rustup.sh Download and execute rustup.sh
@ -29,7 +32,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
cd runner-rs/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu cd runner-rs/ && rustup target add riscv64gc-unknown-linux-gnu && rustup target add x86_64-unknown-linux-gnu
~~~ ~~~
> [!NOTE] > [!NOTE]
> Cargo is configured to build an app for amd64/linux defaultly. RISC-based compilation is optional. > Cargo is configured to build an app for amd64/linux defaultly. RISCV-based compilation is optional.
3.1. Release build of app for amd64/linux 3.1. Release build of app for amd64/linux

75
build.sh Executable file
View File

@ -0,0 +1,75 @@
#!/bin/bash
# Скрипт для сборки и копирования бинарников
# Использование: ./build.sh <архитектура>
# Поддерживаемые архитектуры: amd64, riscv64
if [ -z "$1" ]; then
echo "Ошибка: Необходимо указать архитектуру (например, amd64 или riscv64)."
exit 1
fi
ARCH="$1"
TARGET_DIR="release/${ARCH}"
CONTAINER_NAME="e-monitor"
SUPPORTED_ARCHS=("amd64" "riscv64")
if [[ ! " ${SUPPORTED_ARCHS[@]} " =~ " ${ARCH} " ]]; then
echo "Ошибка: Неизвестная архитектура $ARCH. Допустимые значения: ${SUPPORTED_ARCHS[*]}."
exit 1
fi
# На случай, если контейнер с таким именем уже существует
docker stop e-monitor && docker rm e-monitor
echo "Building Docker image..."
docker build --network=host -t e-monitor . || {
echo "Ошибка: Не удалось построить Docker-образ."
exit 1
}
echo "Running Docker container..."
docker run --name "$CONTAINER_NAME" --dns 8.8.8.8 --network=host e-monitor:latest || {
echo "Ошибка: Не удалось запустить Docker-контейнер."
exit 1
}
echo "Creating target directory: $TARGET_DIR"
mkdir -p "$TARGET_DIR"
case "$ARCH" in
amd64)
echo "Copying binaries for architecture: amd64"
docker cp "$CONTAINER_NAME:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-cli" "$TARGET_DIR/" || {
echo "Ошибка: Не удалось скопировать noxis-cli для amd64."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
exit 1
}
docker cp "$CONTAINER_NAME:/usr/src/kii/target/x86_64-unknown-linux-gnu/release/noxis-rs" "$TARGET_DIR/" || {
echo "Ошибка: Не удалось скопировать noxis-rs для amd64."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
exit 1
}
;;
riscv64)
echo "Copying binaries for architecture: riscv64"
docker cp "$CONTAINER_NAME:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-cli" "$TARGET_DIR/" || {
echo "Ошибка: Не удалось скопировать noxis-cli для riscv64."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
exit 1
}
docker cp "$CONTAINER_NAME:/usr/src/kii/target/riscv64gc-unknown-linux-gnu/release/noxis-rs" "$TARGET_DIR/" || {
echo "Ошибка: Не удалось скопировать noxis-rs для riscv64."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
exit 1
}
;;
esac
echo "Stopping and removing Docker container..."
docker stop "$CONTAINER_NAME" && docker rm "$CONTAINER_NAME"
echo "Build and extraction completed successfully for architecture: $ARCH"
exit 0

BIN
logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 152 KiB

12
noxis-cli/Cargo.toml Normal file
View File

@ -0,0 +1,12 @@
[package]
name = "noxis-cli"
version = "0.2.4"
edition = "2021"
[dependencies]
anyhow = "1.0.94"
clap = { version = "4.5.22", features = ["derive"] }
serde = { version = "1.0.215", features = ["derive"] }
serde_json = "1.0.133"
thiserror = "2.0.11"
tokio = { version = "1.42.0", features = ["full", "net"] }

145
noxis-cli/src/cli.rs Normal file
View File

@ -0,0 +1,145 @@
use clap::{Parser, Subcommand};
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct Cli {
#[command(
subcommand,
help = "to manage Noxis work",
)]
command : Commands,
}
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
pub enum Commands {
#[command(
about = "To get info about current Noxis status",
)]
Status,
#[command(
about = "To start Noxis process",
)]
Start(StartAction),
#[command(
about = "To stop Noxis process",
)]
Stop,
#[command(
about = "To restart Noxis process",
)]
Restart(StartAction),
#[command(
about = "To get list of processes that are being monitoring",
)]
Processes,
// process command
#[command(
about = "To manage current process that is being monitoring",
)]
Process(ProcessCommand),
// config command =
#[command(
about = "To manage config settings",
)]
Config(ConfigCommand),
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct StartAction {
#[arg(
long="with-flags",
num_args = 1..,
value_delimiter = ' '
)]
flags : Vec<String>,
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct ConfigCommand {
#[command(subcommand)]
action : ConfigAction,
}
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
pub enum ConfigAction {
#[command(
about = "To change current Noxis configuration",
)]
Local(LocalConfig),
#[command(
about = "To change credentials of the remote config server",
)]
Remote,
#[command(
about = "To reset all config settings",
)]
Reset,
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct LocalConfig {
// flag
#[arg(
long = "json",
action,
help = "to read following input as JSON",
)]
is_json : bool,
// value
#[arg(
help = "path to config file or config String (with --json flag)",
)]
config : String,
}
#[derive(Debug, Parser, serde::Serialize, serde::Deserialize)]
pub struct ProcessCommand {
#[arg(
help = "name of needed process",
)]
process : String,
#[command(
subcommand,
help = "To get current process's status",
)]
action : ProcessAction,
}
#[derive(Debug, Subcommand, serde::Serialize, serde::Deserialize)]
enum ProcessAction {
#[command(
about = "To get info about current process status",
)]
Status,
#[command(
about = "To start current process",
)]
Start,
#[command(
about = "To stop current process",
)]
Stop,
#[command(
about = "To freeze (hybernaze) current process",
)]
Freeze,
#[command(
about = "To unfreeze (unhybernaze) current process",
)]
Unfreeze,
#[command(
about = "To restart current process",
)]
Restart,
#[command(
about = "To get info about current process's dependencies",
)]
Deps,
#[command(
about = "To get info about current process's files-dependencies",
)]
Files,
#[command(
about = "To get info about current process's services-dependencies",
)]
Services,
}

View File

@ -0,0 +1,14 @@
use thiserror::Error;
use super::cli_net::NOXIS_RS_CREDS;
#[derive(Debug, Error)]
pub enum NoxisCliError {
#[error("Can't send any data to {:?}. Noxis-rs daemon is disabled or can't be accessed", NOXIS_RS_CREDS)]
NoxisDaemonMissing,
#[error("Noxis CLI can't write any data to the Noxis-rs port. Check daemon and it's web-functionality")]
PortIsNotWritable,
#[error("Can't send Cli-prompt to the Noxis-rs. Check it's state")]
CliPromptCanNotBeSent,
#[error("Can't parse CLI struct and send as byte stream")]
ToStringCliParsingParsing,
}

32
noxis-cli/src/cli_net.rs Normal file
View File

@ -0,0 +1,32 @@
use tokio::net::TcpStream;
use tokio::io::AsyncWriteExt;
use tokio::time::{Duration, sleep};
use anyhow::Result;
use super::Cli;
use super::cli_error::NoxisCliError;
pub const NOXIS_RS_CREDS: &str = "127.0.0.1:7753";
pub async fn create_tcp_stream() -> Result<TcpStream> {
Ok(TcpStream::connect(NOXIS_RS_CREDS).await.map_err(|_| NoxisCliError::NoxisDaemonMissing)?)
}
pub async fn try_send(stream: Result<TcpStream>, params: Cli) -> Result<()> {
use serde_json::to_string;
let mut stream = stream.map_err(|_| NoxisCliError::NoxisDaemonMissing)?;
loop {
if stream.writable().await.is_err() {
sleep(Duration::from_millis(100)).await;
continue;
}
// let msg: Cli = from_str(&format!("{:?}", params))?;
let msg= to_string(&params).map_err(|_| NoxisCliError::ToStringCliParsingParsing)?;
// let msg = r"HTTP/1.1 POST\r\nContent-Length: 14\r\nContent-Type: text/plain\r\n\r\nHello, World!@";
stream.write_all(msg.as_bytes()).await.map_err(|_| NoxisCliError::CliPromptCanNotBeSent)?;
// ...
break;
}
Ok(())
}

5
noxis-cli/src/lib.rs Normal file
View File

@ -0,0 +1,5 @@
mod cli;
mod cli_net;
mod cli_error;
pub use cli::*;

15
noxis-cli/src/main.rs Normal file
View File

@ -0,0 +1,15 @@
mod cli;
mod cli_net;
mod cli_error;
use clap::Parser;
use cli::Cli;
use cli_net::{create_tcp_stream, try_send};
use anyhow::Result;
#[tokio::main]
async fn main() -> Result<()>{
let cli = Cli::parse();
try_send(create_tcp_stream().await, cli).await?;
Ok(())
}

20
noxis-rs/Cargo.toml Normal file
View File

@ -0,0 +1,20 @@
[package]
name = "noxis-rs"
version = "0.11.10"
edition = "2021"
[dependencies]
anyhow = "1.0.93"
chrono = "0.4.38"
clap = { version = "4.5.21", features = ["derive"] }
env_logger = "0.11.3"
inotify = "0.10.2"
log = "0.4.22"
pcap = "2.2.0"
redis = "0.25.4"
serde = { version = "1.0.203", features = ["derive"] }
serde_json = "1.0.118"
sysinfo = "0.32.0"
tokio = { version = "1.38.0", features = ["full", "time"] }
noxis-cli = { path = "../noxis-cli" }
dotenv = "0.15.0"

View File

@ -4,12 +4,12 @@
"processes": [ "processes": [
{ {
"name": "temp-process", "name": "temp-process",
"path": "/home/user/monitor/runner-rs/temp-process", "path": "./temp-process",
"dependencies": { "dependencies": {
"files": [ "files": [
{ {
"filename": "dep-file", "filename": "dep-file",
"src": "/home/user/monitor/runner-rs/tests/examples/", "src": "./tests/examples/",
"triggers": { "triggers": {
"onDelete": "stop", "onDelete": "stop",
"onChange": "stay" "onChange": "stay"
@ -31,4 +31,3 @@
} }
] ]
} }

View File

@ -1,27 +1,33 @@
mod options; mod options;
mod utils; mod utils;
use anyhow::Error;
use clap::Parser;
use log::{error, info}; use log::{error, info};
use options::config::*; use options::config::*;
use options::logger::setup_logger; use options::logger::setup_logger;
use options::signals::set_valid_destructor; use options::signals::set_valid_destructor;
use options::structs::*; use options::structs::Processes;
use options::cli_pipeline::init_cli_pipeline;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use utils::*; use utils::*;
use options::preboot::PrebootParams;
#[tokio::main(flavor = "multi_thread")] #[tokio::main(flavor = "multi_thread")]
async fn main() { async fn main() -> anyhow::Result<()>{
let preboot = Arc::new(PrebootParams::parse().validate()?);
let _ = setup_logger(); let _ = setup_logger();
info!("Runner is configurating..."); info!("Runner is configurating...");
// setting up redis connection \ // setting up redis connection \
// then conf checks to choose the most actual \ // then conf checks to choose the most actual \
let processes: Processes = get_actual_config().await.unwrap_or_else(|| { let processes: Processes = get_actual_config(preboot.clone()).await.unwrap_or_else(|| {
error!("No actual configuration for runner. Stopping..."); error!("No actual configuration for runner. Stopping...");
std::process::exit(101); std::process::exit(1);
}); });
info!( info!(
@ -32,7 +38,7 @@ async fn main() {
if processes.processes.is_empty() { if processes.processes.is_empty() {
error!("Processes list is null, runner-rs initialization is stopped"); error!("Processes list is null, runner-rs initialization is stopped");
return; return Err(Error::msg("Empty processes segment in config"));
} }
let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![]; let mut handler: Vec<tokio::task::JoinHandle<()>> = vec![];
// is in need to send to the signals handler thread // is in need to send to the signals handler thread
@ -75,13 +81,18 @@ async fn main() {
// remote config update subscription // remote config update subscription
handler.push(tokio::spawn(async move { handler.push(tokio::spawn(async move {
let _ = subscribe_config_stream(Arc::new(processes)).await; let _ = subscribe_config_stream(Arc::new(processes), preboot.clone()).await;
}));
// cli pipeline
handler.push(tokio::spawn(async move {
let _ = init_cli_pipeline().await;
})); }));
for i in handler { for i in handler {
let _ = i.await; let _ = i.await;
} }
return; Ok(())
} }
// todo: integration tests // todo: integration tests

View File

@ -4,3 +4,5 @@ pub mod config;
pub mod logger; pub mod logger;
pub mod signals; pub mod signals;
pub mod structs; pub mod structs;
pub mod preboot;
pub mod cli_pipeline;

View File

@ -0,0 +1,106 @@
use log::{error, info, warn};
use tokio::net::{TcpListener, TcpStream};
use anyhow::{Result as DynResult, Error};
use tokio::time::{sleep, Duration};
use std::{borrow::BorrowMut, net::{IpAddr, Ipv4Addr}};
// use std::io::BufReader;
use tokio::io::{BufReader, AsyncWriteExt, AsyncBufReadExt};
use noxis_cli::Cli;
use serde_json::from_str;
/// # Fn `init_cli_pipeline`
/// ## for catching all input requests from CLI
///
/// *input* : -
///
/// *output* : `anyhow::Result<()>` to wrap errors
///
/// *initiator* : fn `main`
///
/// *managing* : `TcpListener` object to handle requests
///
/// *depends on* : -
///
pub async fn init_cli_pipeline() -> DynResult<()> {
match init_listener().await {
Some(list) => {
loop {
if let Ok((socket, addr)) = list.accept().await {
// isolation
if IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)) != addr.ip() {
warn!("Declined attempt to connect TCP-socket from {}", addr);
continue;
}
process_connection(socket).await;
}
sleep(Duration::from_millis(500)).await;
}
// Ok(())
},
None => Err(Error::msg("Addr 127.0.0.1:7753 is already in use"))
}
}
/// # Fn `init_listener`
/// ## for creating TCP-listener for communicating with CLI
///
/// *input* : -
///
/// *output* : `Some<TcpListener>` if port 7753 was opened | None if not
///
/// *initiator* : fn `init_cli_pipeline`
///
/// *managing* : `TcpListener` object to handle requests
///
/// *depends on* : `tokio::net::TcpListener`
///
async fn init_listener() -> Option<TcpListener> {
match TcpListener::bind("127.0.0.1:7753").await {
Ok(listener) => {
info!("Runner is listening localhost:7753");
Some(listener)
},
Err(_) => {
error!("Cannot create TCP listener for CLI");
None
}
}
}
/// # Fn `process_connection`
/// ## for processing input CLI requests
///
/// *input* : mut stream: `TcpStream`
///
/// *output* : -
///
/// *initiator* : fn `init_cli_pipeline`
///
/// *managing* : mutable object of `TcpStream`
///
/// *depends on* : `tokio::net::TcpStream`
///
async fn process_connection(mut stream: TcpStream) {
let buf_reader = BufReader::new(stream.borrow_mut());
let mut rqst = buf_reader.lines();
while let Ok(Some(line)) = rqst.next_line().await {
if line.is_empty() {
break
}
match from_str::<Cli>(&line) {
Ok(req) => {
// TODO: func wrapper
dbg!(req);
},
Err(_) => {
break
},
}
println!("{}", line);
}
let response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\nContent-Type: text/plain\r\n\r\nHello, World!";
stream.write_all(response.as_bytes()).await.unwrap();
}

View File

@ -1,4 +1,4 @@
use crate::options::structs::*; use super::structs::*;
use log::{error, info, warn}; use log::{error, info, warn};
use redis::{Client, Connection}; use redis::{Client, Connection};
use std::fs::OpenOptions; use std::fs::OpenOptions;
@ -7,9 +7,10 @@ use std::os::unix::process::CommandExt;
use std::process::Command; use std::process::Command;
use std::sync::Arc; use std::sync::Arc;
use std::{env, fs}; use std::{env, fs};
use tokio::time::Duration; use super::preboot::PrebootParams;
use tokio::time::{Duration, sleep};
const CONFIG_PATH: &str = "settings.json"; // const CONFIG_PATH: &str = "settings.json";
/// # Fn `load_processes` /// # Fn `load_processes`
/// ## for reading and parsing *local* storing config /// ## for reading and parsing *local* storing config
@ -46,18 +47,24 @@ fn load_processes(json_filename: &str) -> Option<Processes> {
/// ///
/// *depends on* : struct `Processes` /// *depends on* : struct `Processes`
/// ///
pub async fn get_actual_config() -> Option<Processes> { pub async fn get_actual_config(params : Arc<PrebootParams>) -> Option<Processes> {
// * if no local conf -> loop and +inf getting conf from redis server // * if no local conf -> loop and +inf getting conf from redis server
// * if local conf -> once getting conf from redis server // * if local conf -> once getting conf from redis server
match load_processes(CONFIG_PATH) { let config_path = params.config.to_str().unwrap_or_else(|| {
error!("Invalid character in config file. Config path was set to default");
"settings.json"
});
info!("Configurating config module with params: no-remote-config={}, no-sub={}, local config path={:?}, remote server={}", params.no_remote_config, params.no_sub, params.config, params.remote_server_url);
match load_processes(config_path) {
Some(local_conf) => { Some(local_conf) => {
info!( info!(
"Found local configuration, version - {}", "Found local configuration, version - {}",
&local_conf.date_of_creation &local_conf.date_of_creation
); );
if !params.no_remote_config {
if let Some(remote_conf) = if let Some(remote_conf) =
// TODO : rework with pubsub mech // TODO : rework with pubsub mech
once_get_remote_configuration(&format!("redis://{}/", local_conf.config_server)) once_get_remote_configuration(&format!("redis://{}/", &params.remote_server_url))
{ {
return match config_comparing(&local_conf, &remote_conf) { return match config_comparing(&local_conf, &remote_conf) {
ConfigActuality::Local => { ConfigActuality::Local => {
@ -66,24 +73,26 @@ pub async fn get_actual_config() -> Option<Processes> {
} }
ConfigActuality::Remote => { ConfigActuality::Remote => {
info!("Pulled config is more actual. Saving changes!"); info!("Pulled config is more actual. Saving changes!");
if save_new_config(&remote_conf, CONFIG_PATH).is_err() { if save_new_config(&remote_conf, config_path).is_err() {
error!("Saving changes process failed due to unexpected error...") error!("Saving changes process failed due to unexpected error...")
} }
Some(remote_conf) Some(remote_conf)
} }
}; };
} }
}
Some(local_conf) Some(local_conf)
} }
None => { None => {
warn!("No local valid conf was found. Trying to pull remote one..."); warn!("No local valid conf was found. Trying to pull remote one...");
let mut conn = get_connection_watcher(&open_watcher("redis://localhost/")); if !params.no_remote_config {
let remote_config = get_remote_conf_watcher(&mut conn).await; let mut conn = get_connection_watcher(&open_watcher(&format!("redis://{}/", &params.remote_server_url)));
if let Some(conf) = remote_config { if let Some(conf) = get_remote_conf_watcher(&mut conn).await {
info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation); info!("Config {} was pulled from Redis-Server. Starting...", &conf.date_of_creation);
let _ = save_new_config(&conf, CONFIG_PATH); let _ = save_new_config(&conf, config_path);
return Some(conf); return Some(conf);
} }
}
None None
} }
} }
@ -182,23 +191,22 @@ fn once_get_remote_configuration(serv_info: &str) -> Option<Processes> {
if remote.is_none() { if remote.is_none() {
error!("Pulled config is invalid. Check it in Redis Server"); error!("Pulled config is invalid. Check it in Redis Server");
} }
return remote; remote
}, },
Err(_) => { Err(_) => {
error!("Cannot extract payload from new message. Check Redis Server state"); error!("Cannot extract payload from new message. Check Redis Server state");
return None; None
}, },
} }
}, },
Err(_) => { Err(_) => {
warn!("Cannot get config from Redis Server. Empty channel"); None
return None;
}, },
} }
}, },
Err(_) => { Err(_) => {
error!("Redis subscription process failed. Check Redis configuration!"); error!("Redis subscription process failed. Check Redis configuration!");
return None; None
} }
} }
} }
@ -311,8 +319,13 @@ fn restart_main_thread() -> std::io::Result<()> {
/// ///
/// *depends on* : `Processes` /// *depends on* : `Processes`
/// ///
pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(), CustomError> { pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>, params: Arc<PrebootParams>) -> Result<(), CustomError> {
if let Ok(client) = Client::open(format!("redis://{}/", &actual_prcs.config_server)) { let config_path = params.config.to_str().unwrap_or_else(|| "settings.json");
if params.no_sub || params.no_remote_config {
return Err(CustomError::Fatal);
}
if let Ok(client) = Client::open(format!("redis://{}/", &params.remote_server_url)) {
if let Ok(mut conn) = client.get_connection() { if let Ok(mut conn) = client.get_connection() {
match crate::utils::get_container_id() { match crate::utils::get_container_id() {
Some(channel_name) => { Some(channel_name) => {
@ -322,7 +335,6 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(),
info!("Runner subscribed on config update publishing in channel {}", &channel_name); info!("Runner subscribed on config update publishing in channel {}", &channel_name);
loop { loop {
if let Ok(msg) = pubsub.get_message() { if let Ok(msg) = pubsub.get_message() {
info!("New config was pulled from Redis Server");
let get_remote_config: Result<String, redis::RedisError> = msg.get_payload(); let get_remote_config: Result<String, redis::RedisError> = msg.get_payload();
match get_remote_config { match get_remote_config {
Ok(payload) => { Ok(payload) => {
@ -330,8 +342,8 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(),
match config_comparing(&actual_prcs, &remote_config) { match config_comparing(&actual_prcs, &remote_config) {
ConfigActuality::Remote => { ConfigActuality::Remote => {
warn!("Pulled config is actual. Saving and restarting..."); warn!("Pulled config is actual. Saving and restarting...");
if save_new_config(&remote_config, CONFIG_PATH).is_err() { if save_new_config(&remote_config, config_path).is_err() {
error!("Error with saving new config to {}. Stopping sub mechanism...", &CONFIG_PATH); error!("Error with saving new config to {}. Stopping sub mechanism...", config_path);
return Err(CustomError::Fatal); return Err(CustomError::Fatal);
} }
if restart_main_thread().is_err() { if restart_main_thread().is_err() {
@ -339,7 +351,10 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(),
return Err(CustomError::Fatal); return Err(CustomError::Fatal);
} }
} }
_ => continue, _ => {
warn!("Pulled new config. Current config is more actual ...");
continue
},
} }
} }
else { else {
@ -352,7 +367,7 @@ pub async fn subscribe_config_stream(actual_prcs: Arc<Processes>) -> Result<(),
}, },
} }
} }
tokio::time::sleep(tokio::time::Duration::from_secs(30)).await; sleep(Duration::from_secs(30)).await;
} }
} else { } else {
error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name); error!("Cannot subscribe channel {}. Check Redis Server status", &channel_name);
@ -433,7 +448,7 @@ fn save_new_config(config: &Processes, config_file: &str) -> Result<(), CustomEr
Err(_) => Err(CustomError::Fatal), Err(_) => Err(CustomError::Fatal),
} }
} }
Err(_) => return Err(CustomError::Fatal), Err(_) => Err(CustomError::Fatal),
} }
} }
Err(_) => Err(CustomError::Fatal), Err(_) => Err(CustomError::Fatal),

View File

@ -61,8 +61,27 @@ pub fn setup_logger() -> Result<(), crate::options::structs::CustomError> {
#[cfg(test)] #[cfg(test)]
mod logger_tests { mod logger_tests {
use super::*; use super::*;
// #[test]
// fn setting_up_logger() {
// assert!(setup_logger().is_ok());
// }
#[test] #[test]
fn setting_up_logger() { fn setting_up_logger() {
assert!(setup_logger().is_ok()); Builder::new()
.format(move |buf, record| {
writeln!(
buf,
"|{}| {} [{}] - {}",
get_container_id().unwrap_or("NODE".to_string()).trim(),
Local::now().format("%d-%m-%Y %H:%M:%S"),
record.level(),
record.args(),
)
})
.filter(None, LevelFilter::Info)
.target(env_logger::Target::Stdout)
.is_test(true)
.init();
} }
} }

View File

@ -0,0 +1,382 @@
// module to handle pre-boot params of the monitor
#[allow(unused_imports)]
use anyhow::{Result, Ok, Error};
use clap::Parser;
use std::path::PathBuf;
use std::env::var;
use dotenv::dotenv;
const SOCKET_PATH: &str = "/var/run/enode/hostagent.sock";
///
enum EnvVars {
NoxisNoHagent,
NoxisNoLogs,
NoxisRefreshLogs,
NoxisNoRemoteConfig,
NoxisNoConfigSub,
NoxisSocketPath,
NoxisLogTo,
NoxisRemoteServerUrl,
NoxisConfig,
NoxisMetrics,
}
///
impl std::fmt::Display for EnvVars {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
EnvVars::NoxisNoHagent => write!(f, "NOXIS_NO_HAGENT"),
EnvVars::NoxisNoLogs => write!(f, "NOXIS_NO_LOGS"),
EnvVars::NoxisRefreshLogs => write!(f, "NOXIS_REFRESH_LOGS"),
EnvVars::NoxisNoRemoteConfig => write!(f, "NOXIS_NO_REMOTE_CONFIG"),
EnvVars::NoxisNoConfigSub => write!(f, "NOXIS_NO_CONFIG_SUB"),
EnvVars::NoxisSocketPath => write!(f, "NOXIS_SOCKET_PATH"),
EnvVars::NoxisLogTo => write!(f, "NOXIS_LOG_TO"),
EnvVars::NoxisRemoteServerUrl => write!(f, "NOXIS_REMOTE_SERVER_URL"),
EnvVars::NoxisConfig => write!(f, "NOXIS_CONFIG"),
EnvVars::NoxisMetrics => write!(f, "NOXIS_METRICS"),
}
}
}
///
impl<'a> EnvVars {
// Default trait func is not satisfying this issue
fn default(self) -> &'a str {
match self {
EnvVars::NoxisNoHagent => "false",
EnvVars::NoxisNoLogs => "false",
EnvVars::NoxisRefreshLogs => "false",
EnvVars::NoxisNoRemoteConfig => "false",
EnvVars::NoxisNoConfigSub => "false",
EnvVars::NoxisSocketPath => "/var/run/enode/hostagent.sock",
EnvVars::NoxisLogTo => "./",
EnvVars::NoxisRemoteServerUrl => "localhost",
EnvVars::NoxisConfig => "./settings.json",
EnvVars::NoxisMetrics => "full",
}
}
fn process_env_var(self, preboot_value: &str) {
// let default = self.default();
match var(self.to_string()) {
std::result::Result::Ok(val) => {
if val != preboot_value {
std::env::set_var(self.to_string(), self.default());
}
},
Err(_) => {
std::env::set_var(self.to_string(), preboot_value);
},
}
}
pub fn setup(preboot: &PrebootParams) {
// setup default if not exists
// check values and save preboot states in env vars if not equal
Self::NoxisNoHagent.process_env_var(&preboot.no_hostagent.to_string());
Self::NoxisNoLogs.process_env_var(&preboot.no_logs.to_string());
Self::NoxisRefreshLogs.process_env_var(&preboot.refresh_logs.to_string());
Self::NoxisNoRemoteConfig.process_env_var(&preboot.no_remote_config.to_string());
Self::NoxisNoConfigSub.process_env_var(&preboot.no_sub.to_string());
Self::NoxisSocketPath.process_env_var(preboot.socket_path.to_str().unwrap());
Self::NoxisLogTo.process_env_var(preboot.log_to.to_str().unwrap());
Self::NoxisRemoteServerUrl.process_env_var(&preboot.remote_server_url);
Self::NoxisConfig.process_env_var(preboot.config.to_str().unwrap());
Self::NoxisMetrics.process_env_var(&preboot.metrics.to_string());
}
}
/// # Enum `MetricsPrebootParams`
/// ## for setting up metrics mode as preboot param from command prompt
///
/// examples:
/// ``` bash
/// noxis-rs ... --metrics full
/// noxis-rs ... --metrics system
/// noxis-rs ... --metrics processes
/// noxis-rs ... --metrics net
/// noxis-rs ... --metrics none
/// ```
///
#[derive(clap::ValueEnum, Debug, Clone)]
pub enum MetricsPrebootParams {
Full,
System,
Processes,
Net,
None,
}
/// # `std::fmt::Display` implementation for `MetricsPrebootParams`
/// ## to enable parsing object to String
impl std::fmt::Display for MetricsPrebootParams {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
MetricsPrebootParams::Full => write!(f, "full"),
MetricsPrebootParams::System => write!(f, "system"),
MetricsPrebootParams::Processes => write!(f, "processes"),
MetricsPrebootParams::Net => write!(f, "net"),
MetricsPrebootParams::None => write!(f, "none"),
}
}
}
/// # struct `PrebootParams`
/// ## to parse and set up all modes as preboot params from command prompt
///
/// ### args :
///
/// `--no-hagent` - to disable hagent work module and set up work mode as autonomous
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-hagent ...
/// ```
///
///
/// `--no-logs` - to disable logging at all
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-logs ...
/// ```
///
/// `--refresh-logs` - to truncate logs directory
/// ### usage :
/// ``` bash
/// noxis-rs ... --refresh-logs ...
/// ```
///
/// `--no-remote-config` - to disable work with Redis as config producer
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-remote-config ...
/// ```
///
/// `--no-sub` - to disable Redis subscribtion mechanism
/// ### usage :
/// ``` bash
/// noxis-rs ... --no-sub ...
/// ```
///
/// `--socket-path` - to set Unix Domain Socket file's directory
/// ### usage :
/// ``` bash
/// noxis-rs ... --socket-path /var/run/enode/hostagent.sock ...
/// ```
///
/// `--log-to` - to set directory for logs
/// ### usage :
/// ``` bash
/// noxis-rs ... --log-to /dir/to/logs/ ...
/// ```
///
/// `--remote-server-url` - to set Redis Server
/// ### usage :
/// ``` bash
/// noxis-rs ... --remote-server-url 192.168.28.12 ...
/// ```
///
/// `--config` - to set Noxis' config full path
/// ### usage :
/// ``` bash
/// noxis-rs ... --config /etc/enode/settings.json ...
/// ```
///
/// `--metrics` - to set metrics mode
/// ### usage :
/// ``` bash
/// noxis-rs ... --metrics full ...
/// ```
#[derive(Debug, Parser)]
pub struct PrebootParams {
// actions
#[arg(
long = "no-hagent",
action,
conflicts_with="socket_path",
help="To disable work with host-agent"
)]
pub no_hostagent : bool,
#[arg(
long = "no-logs",
action,
conflicts_with="log_to",
help="To disable logs"
)]
pub no_logs: bool,
#[arg(
long = "refresh-logs",
action,
conflicts_with="no_logs",
help="To clear logs directory"
)]
pub refresh_logs : bool,
#[arg(
long = "no-remote-config",
action,
help="To disable work with remote config server",
conflicts_with="no_sub")]
pub no_remote_config : bool,
#[arg(
long = "no-sub",
action,
help="To disable subscription mechanism",
conflicts_with="no_remote_config")]
pub no_sub : bool,
// params (socket_path, log_to, remote_server_url, config)
#[arg(
long = "socket-path",
default_value="/var/run/enode/hostagent.sock",
conflicts_with="no_hostagent",
help="To set .sock file's path used in communication with host-agent"
)]
pub socket_path : PathBuf,
#[arg(
long = "log-to",
default_value="./",
conflicts_with="no_logs",
help="To set a path to logs directory"
)]
pub log_to : PathBuf,
#[arg(
long = "remote-server-url",
default_value="localhost",
conflicts_with="no_remote_config",
help = "To set url of remote config server using in remote config pulling mechanism"
)]
pub remote_server_url : String,
#[arg(
long = "config",
short,
default_value="settings.json",
help="To set local config file path"
)]
pub config : PathBuf,
// value enum params (metrics)
#[arg(
long = "metrics",
short,
default_value_t=MetricsPrebootParams::Full,
help="To set metrics grubbing mode"
)]
pub metrics: MetricsPrebootParams,
}
/// # implementation for `MetricsPrebootParams`
/// ## to enable validation mechanism
impl PrebootParams {
pub fn validate(mut self) -> Result<Self> {
dotenv().ok();
if !self.socket_path.exists() && !self.no_hostagent {
if self.socket_path.to_string_lossy() == SOCKET_PATH {
self.no_hostagent = true;
eprintln!("Warning: Socket-file wasn't found. Working without hostagent module...");
} else {
eprintln!("Warning: Socket-file wasn't found or Noxis can't read it. Socket-file was set to default");
if !PathBuf::from(SOCKET_PATH).exists() {
self.no_hostagent = true;
eprintln!("Warning: Socket-file wasn't found. Working without hostagent module...");
} else {
self.socket_path = PathBuf::from(SOCKET_PATH);
}
}
// return Err(Error::msg("Socket-file not found or Noxis can't read it. Cannot start"));
}
// existing log dir
if !self.log_to.exists() && !self.no_logs {
eprintln!("Error: Log-Dir not found or Noxis can't read it. LogDir was set to default");
self.log_to = PathBuf::from("./");
// return Err(Error::msg("Log Directory Not Found or Noxis can't read it. Cannot start"));
}
// existing sock file
if !self.config.exists() {
eprintln!("Error: Invalid character in config file. Config path was set to default");
let config = PathBuf::from("/etc/settings.json");
if !config.exists() && self.no_remote_config {
return Err(Error::msg("Noxis cannot run without config. Create local config or enable remote-config mechanism"));
}
self.config = PathBuf::from("settings.json");
// return Err(Error::msg("Local Config Not Found or Noxis can't read it. Cannot start"));
}
// redis server check
EnvVars::setup(&self);
Ok(self)
}
}
// unit tests of preboot params parsing mech
#[cfg(test)]
mod preboot_unitests{
use super::*;
#[test]
fn parsing_zero_args() {
assert!(PrebootParams::try_parse_from(vec!["runner-rs"]).is_ok())
}
#[test]
fn parsing_hagent_valid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--socket-path", "/path/to/socket"
]).is_ok())
}
#[test]
fn parsing_hagent_invalid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--socket-path", "/path/to/socket",
"--no-hagent"
]).is_err())
}
#[test]
fn parsing_log_valid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--log-to", "/path/to/log/dir"
]).is_ok())
}
#[test]
fn parsing_log_invalid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--log-to /path/to/log/dir",
"--no-logs"
]).is_err())
}
#[test]
fn parsing_config_valid_args() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--no-sub",
"--remote-server-url", "redis://127.0.0.1"
]).is_ok())
}
#[test]
fn parsing_config_invalid_args_noremote_nosub() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--no-remote-config", "--no-sub"
]).is_err())
}
#[test]
fn parsing_config_invalid_args_noremote_remoteurl() {
assert!(PrebootParams::try_parse_from(vec![
"runner-rs",
"--no-remote-config",
"--remote-server-url", "redis://127.0.0.1"
]).is_err())
}
#[test]
fn parsing_metrics_args_using_value_enum() {
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "full"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "system"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "processes"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "net"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "none"]).is_ok());
assert!(PrebootParams::try_parse_from(vec!["runner-rs", "--metrics", "unusual_value"]).is_err());
}
}

View File

@ -1,4 +1,4 @@
use crate::options::structs::CustomError; use super::structs::CustomError;
use std::sync::Arc; use std::sync::Arc;
use tokio::io; use tokio::io;
use tokio::sync::mpsc; use tokio::sync::mpsc;

View File

@ -1,3 +1,5 @@
#![allow(dead_code)]
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -5,6 +7,7 @@ use serde::{Deserialize, Serialize};
pub enum CustomError { pub enum CustomError {
Fatal, Fatal,
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum ConfigActuality { pub enum ConfigActuality {
Local, Local,
@ -18,7 +21,7 @@ pub enum ConfigActuality {
/// ///
/// *depends on* : `TrackingProcess` /// *depends on* : `TrackingProcess`
/// ///
/// ``` /// ``` json
/// { /// {
/// -> "dateOfCreation": "1721381809104", /// -> "dateOfCreation": "1721381809104",
/// -> "configServer": "localhost", /// -> "configServer": "localhost",
@ -44,7 +47,7 @@ pub struct Processes {
/// ///
/// *depends on* : `Dependencies` /// *depends on* : `Dependencies`
/// ///
/// ``` /// ``` json
/// ... /// ...
/// "processes": [ /// "processes": [
/// -> { /// -> {
@ -69,7 +72,7 @@ pub struct TrackingProcess {
/// ///
/// *depends on* : `Files`, `Services` /// *depends on* : `Files`, `Services`
/// ///
/// ``` /// ``` json
/// ... /// ...
/// "path": "/home/user/monitor/runner-rs/temp-process", /// "path": "/home/user/monitor/runner-rs/temp-process",
/// -> "dependencies": { /// -> "dependencies": {
@ -93,7 +96,7 @@ pub struct Dependencies {
/// ///
/// *depends on* : `FileTriggers` /// *depends on* : `FileTriggers`
/// ///
/// ``` /// ``` json
/// ... /// ...
/// "files": [ /// "files": [
/// -> { /// -> {
@ -118,7 +121,7 @@ pub struct Files {
/// ///
/// *depends on* : `ServiceTriggers` /// *depends on* : `ServiceTriggers`
/// ///
/// ``` /// ``` json
/// ... /// ...
/// "services": [ /// "services": [
/// -> { /// -> {
@ -143,7 +146,7 @@ pub struct Services {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
/// ``` /// ``` json
/// ... /// ...
/// "port": 443, /// "port": 443,
/// -> "triggers": { /// -> "triggers": {
@ -168,7 +171,7 @@ pub struct ServiceTriggers {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
/// ``` /// ``` json
/// ... /// ...
/// "src": "/home/user/monitor/runner-rs/tests/examples/", /// "src": "/home/user/monitor/runner-rs/tests/examples/",
/// -> "triggers": { /// -> "triggers": {

272
noxis-rs/src/utils.rs Normal file
View File

@ -0,0 +1,272 @@
pub mod files;
pub mod hagent;
pub mod metrics;
pub mod prcs;
pub mod services;
// TODO : saving current flags state
use crate::options::structs::CustomError;
use crate::options::structs::TrackingProcess;
use files::create_watcher;
use files::file_handler;
use inotify::Inotify;
use log::{error, warn};
use prcs::{
freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process,
unfreeze_process,
};
use services::service_handler;
use std::process::Command;
use std::sync::Arc;
use tokio::join;
use tokio::sync::mpsc;
use tokio::time::Duration;
const GET_ID_CMD: &str = "hostname";
/// # Fn `run_daemons`
/// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `&mut mpsc::Receiver<u8>`,
///
/// *output* : ()
///
/// *initiator* : main thread
///
/// *managing* : Arc to current process struct, Arc to managing channel writer, mut ref to managing channel reader
///
/// *depends on* : all module `prcs`'s functions, fn `running_handler`, fn `utils::files::create_watcher`
///
/// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process
///
pub async fn run_daemons(
proc: Arc<TrackingProcess>,
tx: Arc<mpsc::Sender<u8>>,
rx: &mut mpsc::Receiver<u8>,
) {
// creating watchers + ---buffers---
let mut watchers: Vec<Inotify> = vec![];
for file in proc.dependencies.files.clone().into_iter() {
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
watchers.push(watcher);
} else {
let _ = tx.send(121).await;
}
// watchers.push(create_watcher(&file.filename, &file.src).await.unwrap());
}
let watchers_clone: Arc<tokio::sync::Mutex<Vec<Inotify>>> =
Arc::new(tokio::sync::Mutex::new(watchers));
loop {
let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone());
tokio::select! {
_ = run_hand => continue,
_val = rx.recv() => {
if process_protocol_symbol(proc.clone(), _val.unwrap()).await.is_err() {
return;
}
},
}
tokio::task::yield_now().await;
}
}
async fn process_protocol_symbol(proc: Arc<TrackingProcess>, val: u8) -> Result<(), CustomError>{
match val {
// 1 - File-dependency handling error -> terminating (after waiting)
1 => {
if is_active(&proc.name).await {
error!("File-dependency handling error: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(500)).await;
}
// return;
},
// 2 - File-dependency handling error -> holding (after waiting)
2 => {
if !is_frozen(&proc.name).await {
error!("File-dependency handling error: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
}
},
// 3 - Running process error
3 => {
error!("Error due to starting {} process", &proc.name);
return Err(CustomError::Fatal)
},
// 4 - Timeout of waiting service-dependency -> staying (after waiting)
4 => {
// warn!("Timeout of waiting service-dependency: Ignoring on {} process ..." , &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 5 - Timeout of waiting service-dependency -> terminating (after waiting)
5 => {
if is_active(&proc.name).await {
error!("Timeout of waiting service-dependency: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(500)).await;
}
},
// 6 - Timeout of waiting service-dependency -> holding (after waiting)
6 => {
// println!("holding {}-{}", proc.name, is_active(&proc.name).await);
if !is_frozen(&proc.name).await {
error!("Timeout of waiting service-dependency: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_secs(1)).await;
}
},
// // 7 - File-dependency change -> terminating (after check)
7 => {
error!("File-dependency warning (file changed). Terminating {} process...", &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
return Err(CustomError::Fatal)
},
// // 8 - File-dependency change -> restarting (after check)
8 => {
warn!("File-dependency warning (file changed). Restarting {} process...", &proc.name);
let _ = restart_process(&proc.name, &proc.path).await;
tokio::time::sleep(Duration::from_millis(100)).await;
},
// // 9 - File-dependency change -> staying (after check)
9 => {
// no need to trash logs
warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 10 - Process unfreaze call via file handler (or service handler)
10 | 11 => {
if is_frozen(&proc.name).await {
warn!("Unfreezing process {} call...", &proc.name);
unfreeze_process(&proc.name).await;
}
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 11 - Process unfreaze call via service handler
// 11 => {
// if is_frozen(&proc.name).await {
// warn!("Unfreezing process {} call...", &proc.name);
// unfreeze_process(&proc.name).await;
// }
// tokio::time::sleep(Duration::from_millis(100)).await;
// },
// 101 - Impermissible trigger values in JSON
101 => {
error!("Impermissible trigger values in JSON in {}'s block. Killing thread...", &proc.name);
if is_active(&proc.name).await {
terminate_process(&proc.name).await;
}
return Err(CustomError::Fatal)
},
//
// 121 - Cannot create valid watcher for file dependency
// todo : think about valid situation
121 => {
error!("Cannot create valid watcher for file dependency. Terminating {} process...", &proc.name);
let _ = terminate_process(&proc.name).await;
return Err(CustomError::Fatal)
},
// 111 - global thread termination with killing current child in a face
// of a current process
111 => {
warn!("Terminating {}'s child processes...", &proc.name);
match is_active(&proc.name).await {
true => {
terminate_process(&proc.name).await;
},
false => {
log::info!("Process {} is already terminated!", proc.name);
},
}
},
_ => {},
}
Ok(())
}
// check process status daemon
/// # Fn `run_daemons`
/// ## func to async exec subjobs of checking process, services and files states
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `Arc<tokio::sync::Mutex<Vec<Inotify>>>`
///
/// *output* : ()
///
/// *initiator* : fn `run_daemons`
///
/// *managing* : Arc to current process struct, Arc to Mutex to list of file watchers
///
/// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}`
///
pub async fn running_handler(
prc: Arc<TrackingProcess>,
tx: Arc<mpsc::Sender<u8>>,
watchers: Arc<tokio::sync::Mutex<Vec<Inotify>>>,
) {
// services and files check (once)
let files_check = file_handler(
&prc.name,
&prc.dependencies.files,
tx.clone(),
watchers.clone(),
);
let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone());
let res = join!(files_check, services_check);
// if inactive -> spawn checks -> active is true
if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
if start_process(&prc.name, &prc.path).await.is_err() {
tx.send(3).await.unwrap();
return;
}
}
// if frozen -> spawn checks -> unfreeze is true
else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
tx.send(10).await.unwrap();
return;
}
// tokio::time::sleep(Duration::from_millis(100)).await;
tokio::task::yield_now().await;
}
// todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}'
/// # Fn `get_container_id`
/// ## for getting container id used in logs
///
/// *input* : -
///
/// *output* : Some(String) if cont-id was grubbed | None - if not
///
/// *initiator* : fn `options::logger::setup_logger`
///
/// *managing* : -
///
/// *depends on* : -
///
pub fn get_container_id() -> Option<String> {
match Command::new(GET_ID_CMD).output() {
Ok(output) => {
if !output.status.success() {
return None;
}
let id = String::from_utf8_lossy(&output.stdout).to_string();
if id.is_empty() {
return None;
}
Some(String::from_utf8_lossy(&output.stdout).to_string())
}
Err(_) => None,
}
}
#[cfg(test)]
mod utils_unittests {
use super::get_container_id;
#[test]
fn check_if_container_id_can_be_grabed() {
assert!(get_container_id().is_some());
}
}

View File

@ -1,5 +1,5 @@
use crate::options::structs::{CustomError, Files}; use crate::options::structs::{CustomError, Files};
use crate::utils::prcs::{is_active, is_frozen}; use super::prcs::{is_active, is_frozen};
use inotify::{EventMask, Inotify, WatchMask}; use inotify::{EventMask, Inotify, WatchMask};
use std::borrow::BorrowMut; use std::borrow::BorrowMut;
use std::path::Path; use std::path::Path;
@ -98,7 +98,10 @@ pub async fn file_handler(
// * watcher recreation after dealing with file recreation mechanism in text editors // * watcher recreation after dealing with file recreation mechanism in text editors
let mutex = notify.borrow_mut(); let mutex = notify.borrow_mut();
*mutex = create_watcher(&file.filename, &file.src).await.unwrap(); // *mutex = create_watcher(&file.filename, &file.src).await.unwrap();
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
*mutex = watcher;
}
} }
match file.triggers.on_change.as_str() { match file.triggers.on_change.as_str() {
"stop" => { "stop" => {
@ -159,22 +162,22 @@ mod files_unittests {
use super::*; use super::*;
#[tokio::test] #[tokio::test]
async fn try_to_create_watcher() { async fn try_to_create_watcher() {
let res = create_watcher("dep-file", "/home/user/monitor/runner-rs/tests/examples/").await; let res = create_watcher("dep-file", "./tests/examples/").await;
assert!(res.is_ok()); assert!(res.is_ok());
} }
#[tokio::test] #[tokio::test]
async fn try_to_create_invalid_watcher() { async fn try_to_create_invalid_watcher() {
let res = create_watcher("invalid-file", "/path/to/the/hell").await; let res = create_watcher("invalid-file", "/path/to/the/no/dir").await;
assert!(res.is_err()); assert!(res.is_err());
} }
#[tokio::test] #[tokio::test]
async fn check_existing_file() { async fn check_existing_file() {
let res = check_file("dep-file", "/home/user/monitor/runner-rs/tests/examples/").await; let res = check_file("dep-file", "./tests/examples/").await;
assert!(res.is_ok()); assert!(res.is_ok());
} }
#[tokio::test] #[tokio::test]
async fn check_non_existing_file() { async fn check_non_existing_file() {
let res = check_file("invalid-file", "/path/to/the/hell").await; let res = check_file("invalid-file", "/path/to/the/no/dir").await;
assert!(res.is_err()); assert!(res.is_err());
} }
} }

View File

@ -1,5 +1,11 @@
//
// module needed to check host-agent health condition and to communicate with it // module needed to check host-agent health condition and to communicate with it
//
use tokio::{io::Interest, net::UnixStream}; use tokio::{io::Interest, net::UnixStream};
use anyhow::{Ok, Result, Error};
// to kill lint bug
#[allow(unused_imports)]
use tokio::net::UnixListener;
/// # Fn `open_unix_socket` /// # Fn `open_unix_socket`
/// ## opening unix-socket for host-agent communication /// ## opening unix-socket for host-agent communication
@ -14,9 +20,10 @@ use tokio::{io::Interest, net::UnixStream};
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
async fn open_unix_socket() -> Result<UnixStream, std::io::Error> { #[allow(dead_code)]
let socket = UnixStream::connect("/var/run/enode/hostagent.sock").await?; async fn open_unix_socket(sock_path: &str) -> Result<UnixStream, std::io::Error> {
Ok(socket) // "/var/run/enode/hostagent.sock"
UnixStream::connect(sock_path).await
} }
/// # Fn `ha_healthcheck` /// # Fn `ha_healthcheck`
@ -32,15 +39,11 @@ async fn open_unix_socket() -> Result<UnixStream, std::io::Error> {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
async fn ha_healthcheck(socket: &UnixStream) -> Result<(), std::io::Error >{ #[allow(dead_code)]
async fn ha_healthcheck(socket: &UnixStream) -> Result<(), Error> {
socket.ready(Interest::WRITABLE).await?; socket.ready(Interest::WRITABLE).await?;
if socket.writable().await.is_ok() { socket.writable().await?;
if let Err(er) = socket.try_write(b"Hello HAgent") { socket.try_write(b"Hello HAgent")?;
return Err(er);
}
} else {
return Err(std::io::ErrorKind::WouldBlock.into());
}
Ok(()) Ok(())
} }
@ -57,34 +60,37 @@ async fn ha_healthcheck(socket: &UnixStream) -> Result<(), std::io::Error >{
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), std::io::Error > { #[allow(dead_code)]
async fn ha_send_data(socket: &UnixStream, data: &str) -> Result<(), Error > {
socket.ready(Interest::WRITABLE).await?; socket.ready(Interest::WRITABLE).await?;
if socket.writable().await.is_ok() { socket.writable().await?;
if let Err(er) = socket.try_write(data.as_bytes()) { socket.try_write(data.as_bytes())?;
return Err(er);
}
} else {
return Err(std::io::ErrorKind::WouldBlock.into());
}
Ok(()) Ok(())
} }
#[cfg(test)] #[cfg(test)]
mod hagent_unittets { mod hagent_unittets {
use super::*; use super::*;
#[tokio::test] const TEST_SOCKET: &str = "./tests/examples/hagent_test.sock";
// maybe bool : true -> alive, false -> dead
// simple request on api async fn init_listener() -> UnixListener {
async fn hagent_healthcheck() { let _ = std::fs::remove_file(TEST_SOCKET);
let sock = open_unix_socket().await; UnixListener::bind(TEST_SOCKET).unwrap()
assert!(sock.is_ok());
let sock = sock.unwrap();
assert!(ha_healthcheck(&sock).await.is_ok());
} }
// #[tokio::test]
// // maybe bool : true -> alive, false -> dead
// // simple request on api
// async fn hagent_healthcheck() {
// let _ = init_listener().await;
// let sock = open_unix_socket(TEST_SOCKET).await;
// assert!(sock.is_ok());
// let sock = sock.unwrap();
// assert!(ha_healthcheck(&sock).await.is_ok());
// }
#[tokio::test] #[tokio::test]
// --Result<maybe Response> // --Result<maybe Response>
// one-shot func // one-shot func
async fn send_metrics_to_hagent() { async fn hagent_communication_test() {
use crate::options::structs::{ProcessMetrics, ContainerMetrics, Metrics}; use crate::options::structs::{ProcessMetrics, ContainerMetrics, Metrics};
let procm = ProcessMetrics::new("test-prc", 15.0, 5.0); let procm = ProcessMetrics::new("test-prc", 15.0, 5.0);
@ -92,7 +98,9 @@ mod hagent_unittets {
let metrics = Metrics::new(contm, vec![procm]); let metrics = Metrics::new(contm, vec![procm]);
let metrics = &serde_json::to_string_pretty(&metrics).unwrap(); let metrics = &serde_json::to_string_pretty(&metrics).unwrap();
let sock = open_unix_socket().await; #[allow(unused_mut)]
let mut _list = init_listener().await;
let sock = open_unix_socket(TEST_SOCKET).await;
assert!(sock.is_ok()); assert!(sock.is_ok());
let sock = sock.unwrap(); let sock = sock.unwrap();
assert!(ha_healthcheck(&sock).await.is_ok()); assert!(ha_healthcheck(&sock).await.is_ok());
@ -101,6 +109,6 @@ mod hagent_unittets {
} }
#[tokio::test] #[tokio::test]
async fn open_unixsocket_test() { async fn open_unixsocket_test() {
assert!(open_unix_socket().await.is_ok()); assert!(open_unix_socket("non/valid/socket/file.sock").await.is_err());
} }
} }

View File

@ -7,7 +7,7 @@ use crate::options::structs::TrackingProcess;
use sysinfo::{Process, System}; use sysinfo::{Process, System};
use tokio::join; use tokio::join;
use crate::options::structs::{ProcessMetrics, ContainerMetrics}; use crate::options::structs::{ProcessMetrics, ContainerMetrics};
use crate::utils::get_container_id; use super::get_container_id;
// use pcap::{Device, Capture, Active}; // use pcap::{Device, Capture, Active};
// use std::net::Ipv4Addr; // use std::net::Ipv4Addr;
// use anyhow::{Result, Ok}; // use anyhow::{Result, Ok};
@ -27,6 +27,7 @@ use crate::utils::get_container_id;
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)]
pub async fn init_metrics_grubber() { pub async fn init_metrics_grubber() {
let mut system = System::new(); let mut system = System::new();
// let mut buffer: Vec<PacketInfo> = vec![]; // let mut buffer: Vec<PacketInfo> = vec![];
@ -39,6 +40,8 @@ pub async fn init_metrics_grubber() {
// let _ = capture_packets(shared_buf.clone()).await; // let _ = capture_packets(shared_buf.clone()).await;
} }
#[allow(dead_code)]
#[allow(unused_variables)]
async fn gather_metrics(proc: Arc<Process>) { async fn gather_metrics(proc: Arc<Process>) {
} }
@ -92,6 +95,7 @@ async fn gather_metrics(proc: Arc<Process>) {
/// ///
/// *depends on* : `TrackingProcess` /// *depends on* : `TrackingProcess`
/// ///
#[allow(dead_code)]
async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProcess>>) -> ContainerMetrics { async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProcess>>) -> ContainerMetrics {
let metrics = join!( let metrics = join!(
get_cpu_metrics_container(sys.clone()), get_cpu_metrics_container(sys.clone()),
@ -119,6 +123,7 @@ async fn get_all_container_metrics(sys: Arc<System>, prcs: Arc<Vec<TrackingProce
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)]
async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 { async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 {
sys.global_cpu_usage() sys.global_cpu_usage()
} }
@ -136,6 +141,7 @@ async fn get_cpu_metrics_container(sys: Arc<System>) -> f32 {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)]
async fn get_ram_metrics_container(sys: Arc<System>) -> f32 { async fn get_ram_metrics_container(sys: Arc<System>) -> f32 {
(sys.used_memory() / sys.total_memory()) as f32 * 100.0 (sys.used_memory() / sys.total_memory()) as f32 * 100.0
} }
@ -156,6 +162,7 @@ async fn get_ram_metrics_container(sys: Arc<System>) -> f32 {
/// ///
/// *depends on* : `TrackingProcess` /// *depends on* : `TrackingProcess`
/// ///
#[allow(dead_code)]
async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> { async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> {
prcs.iter().map(|process| process.name.clone()).collect() prcs.iter().map(|process| process.name.clone()).collect()
} }
@ -173,6 +180,7 @@ async fn get_subsystems(prcs: Arc<Vec<TrackingProcess>>) -> Vec<String> {
/// ///
/// *depends on* : - /// *depends on* : -
/// ///
#[allow(dead_code)]
async fn get_all_metrics_process(proc: Arc<Process>, sys: Arc<System>) -> ProcessMetrics { async fn get_all_metrics_process(proc: Arc<Process>, sys: Arc<System>) -> ProcessMetrics {
let metrics = join!( let metrics = join!(
get_cpu_metrics_process(proc.clone()), get_cpu_metrics_process(proc.clone()),

View File

@ -233,14 +233,15 @@ mod process_unittests {
// rewrite, its a pipe // rewrite, its a pipe
#[tokio::test] #[tokio::test]
async fn full_cycle_with_restart() { async fn full_cycle_with_restart() {
let res1 = start_process("temp-process", "/home/user/monitor/runner-rs/temp-process").await; // let _ = std::io::stdout().write_all(b"");
let res1 = start_process("restart-prc", "./tests/examples/restart-prc").await;
assert!(res1.is_ok()); assert!(res1.is_ok());
let res2 = let res2 =
restart_process("temp-process", "/home/user/monitor/runner-rs/temp-process").await; restart_process("restart-prc", "./tests/examples/restart-prc").await;
assert!(res2.is_ok()); assert!(res2.is_ok());
let _ = terminate_process("temp-process").await; let _ = terminate_process("restart-prc").await;
let res3 = is_active("temp-process").await; let res3 = is_active("restart-prc").await;
assert!(res3); assert!(!res3);
} }
// rewrite, its a pipe // rewrite, its a pipe
#[tokio::test] #[tokio::test]
@ -249,7 +250,10 @@ mod process_unittests {
} }
#[tokio::test] #[tokio::test]
async fn is_active_check() { async fn is_active_check() {
assert!(is_active("systemd").await); let res1 = start_process("tmp-prc", "./tests/examples/tmp-prc").await;
assert!(res1.is_ok());
assert!(is_active("tmp-prc").await);
let _ = terminate_process("tmp-prc").await;
} }
#[tokio::test] #[tokio::test]
async fn isnt_active_check() { async fn isnt_active_check() {
@ -257,11 +261,17 @@ mod process_unittests {
} }
#[tokio::test] #[tokio::test]
async fn is_frozen_check() { async fn is_frozen_check() {
assert!(!is_frozen("systemd").await); let res1 = start_process("freeze-check", "./tests/examples/freeze-check").await;
assert!(res1.is_ok());
assert!(!is_frozen("freeze-check").await);
} }
#[tokio::test] #[tokio::test]
async fn pidof_active_process() { async fn pidof_active_process() {
assert!(get_pid("systemd").await.is_some()); assert!(get_pid("pidof-prc").await.is_none());
let res1 = start_process("pidof-prc", "./tests/examples/pidof-prc").await;
assert!(res1.is_ok());
assert!(get_pid("pidof-prc").await.is_some());
let _ = terminate_process("pidof-prc").await;
} }
// broken mechanism need to check // broken mechanism need to check

View File

@ -1,5 +1,5 @@
use crate::options::structs::{CustomError, Services}; use crate::options::structs::{CustomError, Services};
use crate::utils::prcs::{is_active, is_frozen}; use super::prcs::{is_active, is_frozen};
use log::{error, warn}; use log::{error, warn};
use std::net::{TcpStream, ToSocketAddrs}; use std::net::{TcpStream, ToSocketAddrs};
use std::sync::Arc; use std::sync::Arc;

BIN
noxis-rs/temp-process Executable file

Binary file not shown.

Binary file not shown.

BIN
noxis-rs/tests/examples/pidof-prc Executable file

Binary file not shown.

Binary file not shown.

BIN
noxis-rs/tests/examples/tmp-prc Executable file

Binary file not shown.

View File

@ -1,265 +0,0 @@
pub mod files;
pub mod hagent;
pub mod metrics;
pub mod prcs;
pub mod services;
//
use crate::options::structs::TrackingProcess;
use files::create_watcher;
use files::file_handler;
use inotify::Inotify;
use log::{error, warn};
use prcs::{
freeze_process, is_active, is_frozen, restart_process, start_process, terminate_process,
unfreeze_process,
};
use services::service_handler;
use std::process::Command;
use std::sync::Arc;
use tokio::join;
use tokio::sync::mpsc;
use tokio::time::Duration;
const GET_ID_CMD: &str = "hostname";
/// # Fn `run_daemons`
/// ## async func to run 3 main daemons: process, service and file monitors and manage process state according to given messages into channel
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `&mut mpsc::Receiver<u8>`,
///
/// *output* : ()
///
/// *initiator* : main thread
///
/// *managing* : Arc to current process struct, Arc to managing channel writer, mut ref to managing channel reader
///
/// *depends on* : all module `prcs`'s functions, fn `running_handler`, fn `utils::files::create_watcher`
///
/// > *hint* : give mpsc with capacity 1 to jump over potential errors during running process
///
pub async fn run_daemons(
proc: Arc<TrackingProcess>,
tx: Arc<mpsc::Sender<u8>>,
rx: &mut mpsc::Receiver<u8>,
) {
// creating watchers + ---buffers---
let mut watchers: Vec<Inotify> = vec![];
for file in proc.dependencies.files.clone().into_iter() {
if let Ok(watcher) = create_watcher(&file.filename, &file.src).await {
watchers.push(watcher);
} else {
let _ = tx.send(121).await;
}
// watchers.push(create_watcher(&file.filename, &file.src).await.unwrap());
}
let watchers_clone: Arc<tokio::sync::Mutex<Vec<Inotify>>> =
Arc::new(tokio::sync::Mutex::new(watchers));
loop {
let run_hand = running_handler(proc.clone(), tx.clone(), watchers_clone.clone());
tokio::select! {
_ = run_hand => {},
_val = rx.recv() => {
match _val.unwrap() {
// 1 - File-dependency handling error -> terminating (after waiting)
1 => {
if is_active(&proc.name).await {
error!("File-dependency handling error: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
}
return;
},
// 2 - File-dependency handling error -> holding (after waiting)
2 => {
if !is_frozen(&proc.name).await {
error!("File-dependency handling error: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
}
},
// 3 - Running process error
3 => {
error!("Error due to starting {} process", &proc.name);
break;
},
// 4 - Timeout of waiting service-dependency -> staying (after waiting)
4 => {
// warn!("Timeout of waiting service-dependency: Ignoring on {} process ..." , &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 5 - Timeout of waiting service-dependency -> terminating (after waiting)
5 => {
if is_active(&proc.name).await {
error!("Timeout of waiting service-dependency: Terminating {} process ..." , &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(1000)).await;
}
},
// 6 - Timeout of waiting service-dependency -> holding (after waiting)
6 => {
// println!("holding {}-{}", proc.name, is_active(&proc.name).await);
if !is_frozen(&proc.name).await {
error!("Timeout of waiting service-dependency: Freezing {} process ..." , &proc.name);
freeze_process(&proc.name).await;
tokio::time::sleep(Duration::from_secs(1)).await;
}
},
// // 7 - File-dependency change -> terminating (after check)
7 => {
error!("File-dependency warning (file changed). Terminating {} process...", &proc.name);
terminate_process(&proc.name).await;
tokio::time::sleep(Duration::from_millis(100)).await;
return;
},
// // 8 - File-dependency change -> restarting (after check)
8 => {
warn!("File-dependency warning (file changed). Restarting {} process...", &proc.name);
let _ = restart_process(&proc.name, &proc.path).await;
tokio::time::sleep(Duration::from_millis(100)).await;
},
// // 9 - File-dependency change -> staying (after check)
9 => {
// no need to trash logs
warn!("File-dependency warning (file changed). Ignoring event on {} process...", &proc.name);
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 10 - Process unfreaze call via file handler (or service handler)
10 | 11 => {
if is_frozen(&proc.name).await {
warn!("Unfreezing process {} call...", &proc.name);
unfreeze_process(&proc.name).await;
}
tokio::time::sleep(Duration::from_millis(100)).await;
},
// 11 - Process unfreaze call via service handler
// 11 => {
// if is_frozen(&proc.name).await {
// warn!("Unfreezing process {} call...", &proc.name);
// unfreeze_process(&proc.name).await;
// }
// tokio::time::sleep(Duration::from_millis(100)).await;
// },
// 101 - Impermissible trigger values in JSON
101 => {
error!("Impermissible trigger values in JSON in {}'s block. Killing thread...", proc.name);
if is_active(&proc.name).await {
terminate_process(&proc.name).await;
}
break;
},
//
// 121 - Cannot create valid watcher for file dependency
121 => {
error!("Cannot create valid watcher for {}'s file dependency. Terminating thread...", proc.name);
let _ = terminate_process("runner-rs").await;
break;
},
// 111 - global thread termination with killing current child in a face
// of a current process
111 => {
warn!("Terminating {}'s child processes...", &proc.name);
match is_active(&proc.name).await {
true => {
terminate_process(&proc.name).await;
},
false => {
log::info!("Process {} is already terminated!", proc.name);
},
}
break;
},
_ => {},
}
},
}
tokio::task::yield_now().await;
}
tokio::task::yield_now().await;
}
// check process status daemon
/// # Fn `run_daemons`
/// ## func to async exec subjobs of checking process, services and files states
///
/// *input* : `Arc<TrackingProcess>`, `Arc<mpsc::Sender<u8>>`, `Arc<tokio::sync::Mutex<Vec<Inotify>>>`
///
/// *output* : ()
///
/// *initiator* : fn `run_daemons`
///
/// *managing* : Arc to current process struct, Arc to Mutex to list of file watchers
///
/// *depends on* : fn `utils::files::file_handler`, fn `utils::services::service_handler`, fn `utils::prcs::{is_active, is_frozen, start_process}`
///
pub async fn running_handler(
prc: Arc<TrackingProcess>,
tx: Arc<mpsc::Sender<u8>>,
watchers: Arc<tokio::sync::Mutex<Vec<Inotify>>>,
) {
// services and files check (once)
let files_check = file_handler(
&prc.name,
&prc.dependencies.files,
tx.clone(),
watchers.clone(),
);
let services_check = service_handler(&prc.name, &prc.dependencies.services, tx.clone());
let res = join!(files_check, services_check);
// if inactive -> spawn checks -> active is true
if !is_active(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
if start_process(&prc.name, &prc.path).await.is_err() {
tx.send(3).await.unwrap();
return;
}
}
// if frozen -> spawn checks -> unfreeze is true
else if is_frozen(&prc.name).await && res.0.is_ok() && res.1.is_ok() {
tx.send(10).await.unwrap();
return;
}
// tokio::time::sleep(Duration::from_millis(100)).await;
tokio::task::yield_now().await;
}
// todo: cmd across cat /proc/self/mountinfo | grep "/docker/containers/" | head -1 | awk -F '/' '{print $5}'
/// # Fn `get_container_id`
/// ## for getting container id used in logs
///
/// *input* : -
///
/// *output* : Some(String) if cont-id was grubbed | None - if not
///
/// *initiator* : fn `options::logger::setup_logger`
///
/// *managing* : -
///
/// *depends on* : -
///
pub fn get_container_id() -> Option<String> {
match Command::new(GET_ID_CMD).output() {
Ok(output) => {
if !output.status.success() {
return None;
}
let id = String::from_utf8_lossy(&output.stdout).to_string();
if id.is_empty() {
return None;
}
Some(String::from_utf8_lossy(&output.stdout).to_string())
}
Err(_) => None,
}
}
#[cfg(test)]
mod utils_unittests {
use super::get_container_id;
#[test]
fn check_if_container_id_can_be_grabed() {
assert!(get_container_id().is_some());
}
}

Binary file not shown.