improve llm selection for simple reasoning pipeline
This commit is contained in:
@@ -17,8 +17,7 @@ if machine == "x86_64":
|
||||
BINARY_REMOTE_NAME = f"frpc_{platform.system().lower()}_{machine.lower()}"
|
||||
EXTENSION = ".exe" if os.name == "nt" else ""
|
||||
BINARY_URL = (
|
||||
"some-endpoint.com"
|
||||
f"/kotaemon/tunneling/{VERSION}/{BINARY_REMOTE_NAME}{EXTENSION}"
|
||||
"some-endpoint.com" f"/kotaemon/tunneling/{VERSION}/{BINARY_REMOTE_NAME}{EXTENSION}"
|
||||
)
|
||||
|
||||
BINARY_FILENAME = f"{BINARY_REMOTE_NAME}_v{VERSION}"
|
||||
|
@@ -194,7 +194,6 @@ class ChatOpenAI(LCChatMixin, ChatLLM): # type: ignore
|
||||
|
||||
|
||||
class AzureChatOpenAI(LCChatMixin, ChatLLM): # type: ignore
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
azure_endpoint: str | None = None,
|
||||
|
Reference in New Issue
Block a user