fix: re-enable tests and fix legacy test interface (#208)

* fix: re-enable tests and fix legacy test interface

* fix: skip llamacpp based on installed status

* fix: minor fix
This commit is contained in:
Tuan Anh Nguyen Dang (Tadashi_Cin)
2024-09-04 12:37:39 +07:00
committed by GitHub
parent 92f6b8e1bf
commit 76f2652d2a
15 changed files with 177 additions and 120 deletions

View File

@@ -7,7 +7,7 @@ from index import ReaderIndexingPipeline
from openai.resources.embeddings import Embeddings
from openai.types.chat.chat_completion import ChatCompletion
from kotaemon.llms import LCAzureChatOpenAI
from kotaemon.llms import AzureChatOpenAI
with open(Path(__file__).parent / "resources" / "embedding_openai.json") as f:
openai_embedding = json.load(f)
@@ -61,12 +61,11 @@ def test_ingest_pipeline(patch, mock_openai_embedding, tmp_path):
assert len(results) == 1
# create llm
llm = LCAzureChatOpenAI(
openai_api_base="https://test.openai.azure.com/",
openai_api_key="some-key",
openai_api_version="2023-03-15-preview",
deployment_name="gpt35turbo",
temperature=0,
llm = AzureChatOpenAI(
api_key="dummy",
api_version="2024-05-01-preview",
azure_deployment="gpt-4o",
azure_endpoint="https://test.openai.azure.com/",
)
qa_pipeline = indexing_pipeline.to_qa_pipeline(llm=llm, openai_api_key="some-key")
response = qa_pipeline("Summarize this document.")