diff --git a/scripts/serve_local.py b/scripts/serve_local.py index 61b8f77..f1f4831 100644 --- a/scripts/serve_local.py +++ b/scripts/serve_local.py @@ -3,9 +3,7 @@ import subprocess from inspect import currentframe, getframeinfo from pathlib import Path -import dotenv - -configs = dotenv.dotenv_values(".env") +from decouple import config system_name = platform.system() @@ -53,7 +51,7 @@ def serve_llamacpp_python(local_model_file: Path, **kwargs): def main(): - local_model_file = configs.get("LOCAL_MODEL", "") + local_model_file = config("LOCAL_MODEL", default="") if not local_model_file: print("LOCAL_MODEL not set in the `.env` file.") diff --git a/scripts/server_llamacpp_linux.sh b/scripts/server_llamacpp_linux.sh index f72ccde..a45e670 100755 --- a/scripts/server_llamacpp_linux.sh +++ b/scripts/server_llamacpp_linux.sh @@ -87,7 +87,7 @@ activate_environment # install dependencies # ver 0.2.56 produces segment error for /embeddings on MacOS -python -m pip install llama-cpp-python[server]!=0.2.56 +python -m pip install llama-cpp-python[server]==0.2.55 # start the server with passed params python -m llama_cpp.server $@ diff --git a/scripts/server_llamacpp_macos.sh b/scripts/server_llamacpp_macos.sh index 4ed9ac2..13d0784 100755 --- a/scripts/server_llamacpp_macos.sh +++ b/scripts/server_llamacpp_macos.sh @@ -88,7 +88,7 @@ activate_environment # install dependencies # ver 0.2.56 produces segment error for /embeddings on MacOS -python -m pip install llama-cpp-python[server]!=0.2.56 +python -m pip install llama-cpp-python[server]==0.2.55 # start the server with passed params python -m llama_cpp.server $@ diff --git a/scripts/server_llamacpp_windows.bat b/scripts/server_llamacpp_windows.bat index 48779db..97c1292 100644 --- a/scripts/server_llamacpp_windows.bat +++ b/scripts/server_llamacpp_windows.bat @@ -28,7 +28,7 @@ call :activate_environment @rem install dependencies @rem ver 0.2.56 produces segment error for /embeddings on MacOS -call python -m pip install llama-cpp-python[server]!=0.2.56 +call python -m pip install llama-cpp-python[server]==0.2.55 @REM @rem start the server with passed params call python -m llama_cpp.server %*