File size: 3,381 Bytes
d037cdf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import datetime
import os

from prompts.llm import qa_prompt_template
from prompts.condense_llm import condense_template
from typing import Dict, Any
from config import HISTORY_DIR

def get_messages_last_content(data: Dict[str, Any], **_: Any) -> str:
    """ get the last content of the llm request messages array

    :param data: the user llm request data
    :type data: Dict[str, Any]

    Example:
        .. code-block:: python

            from gptcache.processor.pre import get_messages_last_content

            content = get_messages_last_content({"messages": [{"content": "hello"}, {"content": "world"}]})
            # "world"
    """
    result = data.get("messages")[-1].content.split("Human:")[-1].split("Assistant:")[0].strip()
    print(result)
    return result


def transcribe(current_model, audio):
    return current_model.audio_response(audio)


def history_file_path(username):
    dirname = os.path.join(HISTORY_DIR, username)
    os.makedirs(dirname, exist_ok=True)
    now = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    history_path = os.path.join(dirname, f"{now}.json")
    return history_path

def load_lasted_file_username(username):
    if username not in os.listdir(HISTORY_DIR):
        return None
    date_time_list = []
    for filename in os.listdir(os.path.join(HISTORY_DIR, username)):
        date_time_list.append(datetime.datetime.strptime(filename[:19], '%Y-%m-%d_%H-%M-%S'))

    lasted_time = max(date_time_list)
    lasted_file = lasted_time.strftime('%Y-%m-%d_%H-%M-%S')
    return os.path.join(HISTORY_DIR, username, lasted_file)


def load_chat_history(current_model, *args):
    return current_model.load_history(*args)


def predict(chatbot, model, inputs, use_websearch, custom_websearch):
    iter = model.inference(inputs=inputs, chatbot=chatbot, streaming=True, use_websearch=use_websearch, 
                           custom_websearch=custom_websearch, qa_prompt_template=qa_prompt_template, 
                           condense_prompt_template=condense_template)
    for response in iter:
        yield response


def set_user_indentifier(current_model, *args):
    return current_model.set_user_indentifier(*args)


def retry(chatbot, model, use_websearch, custom_websearch):
    model.delete_last_conversation()
    if len(chatbot) > 0:
        inputs = chatbot[-1][0]
    chatbot = predict(chatbot, model, inputs, use_websearch, custom_websearch)
    yield chatbot


def reset(current_model):
    return current_model.reset_conversation()


def delete_chat_history(current_model, *args):
    return current_model.delete_history(*args)


def delete_first_conversation(current_model):
    return current_model.delete_first_conversation()


def delete_last_conversation(current_model, chatbot):
    if len(chatbot) > 0:
        chatbot.pop()
    current_model.delete_last_conversation()
    return chatbot


def add_source_numbers(lst, source_name = "Source", use_source = True):
    if use_source:
        return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)]
    else:
        return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)]

def add_details(lst):
    nodes = []
    for txt in lst:
        brief = txt[:25].replace("\n", "")
        nodes.append(
            f"<details><summary>{brief}...</summary><p>{txt}</p></details>"
        )
    return nodes