Spaces:
Runtime error
Runtime error
import datetime | |
import os | |
from prompts.llm import qa_prompt_template | |
from prompts.condense_llm import condense_template | |
from typing import Dict, Any | |
from config import HISTORY_DIR | |
def get_messages_last_content(data: Dict[str, Any], **_: Any) -> str: | |
""" get the last content of the llm request messages array | |
:param data: the user llm request data | |
:type data: Dict[str, Any] | |
Example: | |
.. code-block:: python | |
from gptcache.processor.pre import get_messages_last_content | |
content = get_messages_last_content({"messages": [{"content": "hello"}, {"content": "world"}]}) | |
# "world" | |
""" | |
result = data.get("messages")[-1].content.split("Human:")[-1].split("Assistant:")[0].strip() | |
print(result) | |
return result | |
def transcribe(current_model, audio): | |
return current_model.audio_response(audio) | |
def history_file_path(username): | |
dirname = os.path.join(HISTORY_DIR, username) | |
os.makedirs(dirname, exist_ok=True) | |
now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') | |
history_path = os.path.join(dirname, f"{now}.json") | |
return history_path | |
def load_lasted_file_username(username): | |
if username not in os.listdir(HISTORY_DIR): | |
return None | |
date_time_list = [] | |
for filename in os.listdir(os.path.join(HISTORY_DIR, username)): | |
date_time_list.append(datetime.datetime.strptime(filename[:19], '%Y-%m-%d_%H-%M-%S')) | |
lasted_time = max(date_time_list) | |
lasted_file = lasted_time.strftime('%Y-%m-%d_%H-%M-%S') | |
return os.path.join(HISTORY_DIR, username, lasted_file) | |
def load_chat_history(current_model, *args): | |
return current_model.load_history(*args) | |
def predict(chatbot, model, inputs, use_websearch, custom_websearch): | |
iter = model.inference(inputs=inputs, chatbot=chatbot, streaming=True, use_websearch=use_websearch, | |
custom_websearch=custom_websearch, qa_prompt_template=qa_prompt_template, | |
condense_prompt_template=condense_template) | |
for response in iter: | |
yield response | |
def set_user_indentifier(current_model, *args): | |
return current_model.set_user_indentifier(*args) | |
def retry(chatbot, model, use_websearch, custom_websearch): | |
model.delete_last_conversation() | |
if len(chatbot) > 0: | |
inputs = chatbot[-1][0] | |
chatbot = predict(chatbot, model, inputs, use_websearch, custom_websearch) | |
yield chatbot | |
def reset(current_model): | |
return current_model.reset_conversation() | |
def delete_chat_history(current_model, *args): | |
return current_model.delete_history(*args) | |
def delete_first_conversation(current_model): | |
return current_model.delete_first_conversation() | |
def delete_last_conversation(current_model, chatbot): | |
if len(chatbot) > 0: | |
chatbot.pop() | |
current_model.delete_last_conversation() | |
return chatbot | |
def add_source_numbers(lst, source_name = "Source", use_source = True): | |
if use_source: | |
return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)] | |
else: | |
return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)] | |
def add_details(lst): | |
nodes = [] | |
for txt in lst: | |
brief = txt[:25].replace("\n", "") | |
nodes.append( | |
f"<details><summary>{brief}...</summary><p>{txt}</p></details>" | |
) | |
return nodes |