fix: clean chatUI and simplify regen logic (#452) bump:patch

This commit is contained in:
Tuan Anh Nguyen Dang (Tadashi_Cin) 2024-11-02 17:19:10 +07:00 committed by GitHub
parent fb241a467c
commit bd2490bef1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 31 additions and 151 deletions

View File

@ -63,7 +63,9 @@ os.environ["HF_HUB_CACHE"] = str(KH_APP_DATA_DIR / "huggingface")
KH_DOC_DIR = this_dir / "docs" KH_DOC_DIR = this_dir / "docs"
KH_MODE = "dev" KH_MODE = "dev"
KH_FEATURE_CHAT_SUGGESTION = config("KH_FEATURE_CHAT_SUGGESTION", default=False) KH_FEATURE_CHAT_SUGGESTION = config(
"KH_FEATURE_CHAT_SUGGESTION", default=False, cast=bool
)
KH_FEATURE_USER_MANAGEMENT = config( KH_FEATURE_USER_MANAGEMENT = config(
"KH_FEATURE_USER_MANAGEMENT", default=True, cast=bool "KH_FEATURE_USER_MANAGEMENT", default=True, cast=bool
) )

View File

@ -163,6 +163,10 @@ mark {
right: 40px; right: 40px;
} }
.upload-button {
display: none;
}
.scrollable { .scrollable {
overflow-y: auto; overflow-y: auto;
} }

View File

@ -192,7 +192,6 @@ class ChatPage(BasePage):
gr.on( gr.on(
triggers=[ triggers=[
self.chat_panel.text_input.submit, self.chat_panel.text_input.submit,
self.chat_panel.submit_btn.click,
], ],
fn=self.submit_msg, fn=self.submit_msg,
inputs=[ inputs=[
@ -312,105 +311,6 @@ class ChatPage(BasePage):
concurrency_limit=20, concurrency_limit=20,
) )
regen_event = (
self.chat_panel.regen_btn.click(
fn=self.regen_fn,
inputs=[
self.chat_control.conversation_id,
self.chat_panel.chatbot,
self._app.settings_state,
self._reasoning_type,
self._llm_type,
self.state_chat,
self._app.user_id,
]
+ self._indices_input,
outputs=[
self.chat_panel.chatbot,
self.info_panel,
self.plot_panel,
self.state_plot_panel,
self.state_chat,
],
concurrency_limit=20,
show_progress="minimal",
)
.then(
fn=lambda: True,
inputs=None,
outputs=[self._preview_links],
js=pdfview_js,
)
.success(
fn=self.check_and_suggest_name_conv,
inputs=self.chat_panel.chatbot,
outputs=[
self.chat_control.conversation_rn,
self._conversation_renamed,
],
)
.success(
self.chat_control.rename_conv,
inputs=[
self.chat_control.conversation_id,
self.chat_control.conversation_rn,
self._conversation_renamed,
self._app.user_id,
],
outputs=[
self.chat_control.conversation,
self.chat_control.conversation,
self.chat_control.conversation_rn,
],
show_progress="hidden",
)
)
# chat suggestion toggle
if getattr(flowsettings, "KH_FEATURE_CHAT_SUGGESTION", False):
regen_event = regen_event.success(
fn=self.suggest_chat_conv,
inputs=[
self._app.settings_state,
self.chat_panel.chatbot,
],
outputs=[
self.state_follow_up,
self._suggestion_updated,
],
show_progress="hidden",
).success(
self.chat_control.persist_chat_suggestions,
inputs=[
self.chat_control.conversation_id,
self.state_follow_up,
self._suggestion_updated,
self._app.user_id,
],
show_progress="hidden",
)
# final data persist
regen_event = regen_event.then(
fn=self.persist_data_source,
inputs=[
self.chat_control.conversation_id,
self._app.user_id,
self.info_panel,
self.state_plot_panel,
self.state_retrieval_history,
self.state_plot_history,
self.chat_panel.chatbot,
self.state_chat,
]
+ self._indices_input,
outputs=[
self.state_retrieval_history,
self.state_plot_history,
],
concurrency_limit=20,
)
self.chat_control.btn_info_expand.click( self.chat_control.btn_info_expand.click(
fn=lambda is_expanded: ( fn=lambda is_expanded: (
gr.update(scale=INFO_PANEL_SCALES[is_expanded]), gr.update(scale=INFO_PANEL_SCALES[is_expanded]),
@ -616,6 +516,15 @@ class ChatPage(BasePage):
if not chat_input: if not chat_input:
raise ValueError("Input is empty") raise ValueError("Input is empty")
chat_input_text = chat_input.get("text", "")
# check if regen mode is active
if chat_input_text:
chat_history = chat_history + [(chat_input_text, None)]
else:
if not chat_history:
raise gr.Error("Empty chat")
if not conv_id: if not conv_id:
id_, update = self.chat_control.new_conv(user_id) id_, update = self.chat_control.new_conv(user_id)
with Session(engine) as session: with Session(engine) as session:
@ -637,8 +546,8 @@ class ChatPage(BasePage):
new_chat_suggestion = chat_suggest new_chat_suggestion = chat_suggest
return ( return (
"", {},
chat_history + [(chat_input, None)], chat_history,
new_conv_id, new_conv_id,
conv_update, conv_update,
new_conv_name, new_conv_name,
@ -871,9 +780,13 @@ class ChatPage(BasePage):
*selecteds, *selecteds,
): ):
"""Chat function""" """Chat function"""
chat_input = chat_history[-1][0] chat_input, chat_output = chat_history[-1]
chat_history = chat_history[:-1] chat_history = chat_history[:-1]
# if chat_input is empty, assume regen mode
if chat_output:
state["app"]["regen"] = True
queue: asyncio.Queue[Optional[dict]] = asyncio.Queue() queue: asyncio.Queue[Optional[dict]] = asyncio.Queue()
# construct the pipeline # construct the pipeline
@ -921,6 +834,7 @@ class ChatPage(BasePage):
plot_gr = self._json_to_plot(plot) plot_gr = self._json_to_plot(plot)
state[pipeline.get_info()["id"]] = reasoning_state["pipeline"] state[pipeline.get_info()["id"]] = reasoning_state["pipeline"]
yield ( yield (
chat_history + [(chat_input, text or msg_placeholder)], chat_history + [(chat_input, text or msg_placeholder)],
refs, refs,
@ -942,35 +856,6 @@ class ChatPage(BasePage):
state, state,
) )
def regen_fn(
self,
conversation_id,
chat_history,
settings,
reasoning_type,
llm_type,
state,
user_id,
*selecteds,
):
"""Regen function"""
if not chat_history:
gr.Warning("Empty chat")
yield chat_history, "", state
return
state["app"]["regen"] = True
yield from self.chat_fn(
conversation_id,
chat_history,
settings,
reasoning_type,
llm_type,
state,
user_id,
*selecteds,
)
def check_and_suggest_name_conv(self, chat_history): def check_and_suggest_name_conv(self, chat_history):
suggest_pipeline = SuggestConvNamePipeline() suggest_pipeline = SuggestConvNamePipeline()
new_name = gr.update() new_name = gr.update()

View File

@ -21,24 +21,13 @@ class ChatPanel(BasePage):
bubble_full_width=False, bubble_full_width=False,
) )
with gr.Row(): with gr.Row():
self.text_input = gr.Text( self.text_input = gr.MultimodalTextbox(
interactive=True,
scale=20,
file_count="multiple",
placeholder="Chat input", placeholder="Chat input",
scale=15,
container=False, container=False,
max_lines=10, show_label=False,
)
self.submit_btn = gr.Button(
value="Send",
scale=1,
min_width=10,
variant="primary",
elem_classes=["cap-button-height"],
)
self.regen_btn = gr.Button(
value="Regen",
scale=1,
min_width=10,
elem_classes=["cap-button-height"],
) )
def submit_msg(self, chat_input, chat_history): def submit_msg(self, chat_input, chat_history):

View File

@ -14,7 +14,7 @@ class ChatSuggestion(BasePage):
with gr.Accordion(label="Chat Suggestion", open=False) as self.accordion: with gr.Accordion(label="Chat Suggestion", open=False) as self.accordion:
self.example = gr.DataFrame( self.example = gr.DataFrame(
value=chat_samples, value=chat_samples,
headers=["Sample"], headers=["Next Question"],
interactive=False, interactive=False,
wrap=True, wrap=True,
) )
@ -23,4 +23,4 @@ class ChatSuggestion(BasePage):
return self.example return self.example
def select_example(self, ev: gr.SelectData): def select_example(self, ev: gr.SelectData):
return ev.value return {"text": ev.value}