This commit is contained in:
ian 2024-03-27 18:58:19 +07:00
parent da86fa463f
commit c6db7f5d01
4 changed files with 5 additions and 7 deletions

View File

@ -3,9 +3,7 @@ import subprocess
from inspect import currentframe, getframeinfo
from pathlib import Path
import dotenv
configs = dotenv.dotenv_values(".env")
from decouple import config
system_name = platform.system()
@ -53,7 +51,7 @@ def serve_llamacpp_python(local_model_file: Path, **kwargs):
def main():
local_model_file = configs.get("LOCAL_MODEL", "")
local_model_file = config("LOCAL_MODEL", default="")
if not local_model_file:
print("LOCAL_MODEL not set in the `.env` file.")

View File

@ -87,7 +87,7 @@ activate_environment
# install dependencies
# ver 0.2.56 produces segment error for /embeddings on MacOS
python -m pip install llama-cpp-python[server]!=0.2.56
python -m pip install llama-cpp-python[server]==0.2.55
# start the server with passed params
python -m llama_cpp.server $@

View File

@ -88,7 +88,7 @@ activate_environment
# install dependencies
# ver 0.2.56 produces segment error for /embeddings on MacOS
python -m pip install llama-cpp-python[server]!=0.2.56
python -m pip install llama-cpp-python[server]==0.2.55
# start the server with passed params
python -m llama_cpp.server $@

View File

@ -28,7 +28,7 @@ call :activate_environment
@rem install dependencies
@rem ver 0.2.56 produces segment error for /embeddings on MacOS
call python -m pip install llama-cpp-python[server]!=0.2.56
call python -m pip install llama-cpp-python[server]==0.2.55
@REM @rem start the server with passed params
call python -m llama_cpp.server %*