# `ML_API` mod's `.env` EXAMPLE # `URL` and `ENDPOINT` that are used by model's API to integrate with # ------------------------------------------------------------------- # i.e. using with `ollama` should set it to: # http://{BASE_URL}:11434/api/generate # ------------------------------------------------------------------- ML_TARGET_URL="http://url.to/ml/api" # `ML_MODEL_NAME` ML_MODEL_NAME="kis-test" # `ML_REQUEST_TIMEOUT` for setting ML response wait time # ------------------------------------------------------ # - Measures in `secs` # - Default value - 10 # ------------------------------------------------------ ML_REQUEST_TIMEOUT="10" # `ML_API_LOG_LEVEL` log level selection # -------------------------------------- # - Default value - `INFO` # - Existing options: # 1) TRACE - full log info # 2) DEBUG # 3) INFO - common log info # 4) WARN # 5) ERROR # 6) OFF - disabled logs # -------------------------------------- ML_API_LOG_LEVEL="INFO" # `ML_API_PORT` for setting up virt port number usage # --------------------------------------------------- # - Deault value - 5143 # --------------------------------------------------- ML_API_PORT="5134"