Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,6 @@ import gradio as gr
|
|
4 |
from zhipuai import ZhipuAI
|
5 |
import json
|
6 |
|
7 |
-
api_key = os.environ['ZHIPUAI_API_KEY']
|
8 |
-
|
9 |
def convert_to_openai_format(nested_chat):
|
10 |
openai_format = []
|
11 |
for dialogue in nested_chat:
|
@@ -14,7 +12,7 @@ def convert_to_openai_format(nested_chat):
|
|
14 |
openai_format.extend([user_dialogue, assistant_dialogue])
|
15 |
return openai_format
|
16 |
|
17 |
-
def master_llm(user_prompt, history):
|
18 |
# 生成针对专家LLM的系统提示
|
19 |
# 示例: 根据用户提问生成一个简单的系统提示
|
20 |
if history != []:
|
@@ -36,13 +34,14 @@ def master_llm(user_prompt, history):
|
|
36 |
|
37 |
参数解释:
|
38 |
temperature为AI回复时的随机程度,值越小意味着回答逻辑越发散。取值为(0,1),但不能等于0或1。
|
39 |
-
top_p为AI
|
40 |
-
|
|
|
41 |
|
42 |
-
|
43 |
```
|
44 |
{{
|
45 |
-
"expert_system_prompt":"
|
46 |
"temperature":"0.5",
|
47 |
"top_p":"0.5"
|
48 |
}}
|
@@ -71,7 +70,7 @@ top_p为AI会考虑的候选采样范围,比如0.1指只会选择前10%推荐
|
|
71 |
|
72 |
return expert_system_prompt, temperature, top_p
|
73 |
|
74 |
-
def expert_llm(user_prompt, history, expert_system_prompt, temperature, top_p):
|
75 |
client = ZhipuAI(api_key=api_key)
|
76 |
if history != []:
|
77 |
prompt_records = convert_to_openai_format(history)
|
@@ -87,12 +86,14 @@ def expert_llm(user_prompt, history, expert_system_prompt, temperature, top_p):
|
|
87 |
)
|
88 |
return response.choices[0].message.content
|
89 |
|
90 |
-
def gradio_fn(message, history):
|
91 |
-
expert_system_prompt, temperature, top_p = master_llm(message, history)
|
92 |
-
expert_response = expert_llm(message, history, expert_system_prompt, temperature, top_p)
|
93 |
return expert_response
|
94 |
|
95 |
-
|
|
|
|
|
96 |
|
97 |
if __name__ == "__main__":
|
98 |
-
demo.launch()
|
|
|
4 |
from zhipuai import ZhipuAI
|
5 |
import json
|
6 |
|
|
|
|
|
7 |
def convert_to_openai_format(nested_chat):
|
8 |
openai_format = []
|
9 |
for dialogue in nested_chat:
|
|
|
12 |
openai_format.extend([user_dialogue, assistant_dialogue])
|
13 |
return openai_format
|
14 |
|
15 |
+
def master_llm(user_prompt, history, api_key):
|
16 |
# 生成针对专家LLM的系统提示
|
17 |
# 示例: 根据用户提问生成一个简单的系统提示
|
18 |
if history != []:
|
|
|
34 |
|
35 |
参数解释:
|
36 |
temperature为AI回复时的随机程度,值越小意味着回答逻辑越发散。取值为(0,1),但不能等于0或1。
|
37 |
+
top_p为AI会考虑的回复候选采样范围,比如0.1指只会选择前10%最佳的候选回复项。取值为(0,1),但不能等于0或1。
|
38 |
+
技巧:一般来说如果需要创意类型的AI,就会让这两个参数的值高一些;
|
39 |
+
如果需要严格服从型的AI,则需要temperature与top_p尽量低一点;
|
40 |
|
41 |
+
注意,请不要刻意生成专家,如果无法判断需要什么领域的专家(比如无上下文,用户乱问乱答),则直接回复此默认设定:
|
42 |
```
|
43 |
{{
|
44 |
+
"expert_system_prompt":"根据用户的发送的信息(如有上下文,请根据上下文),合适地回应用户。",
|
45 |
"temperature":"0.5",
|
46 |
"top_p":"0.5"
|
47 |
}}
|
|
|
70 |
|
71 |
return expert_system_prompt, temperature, top_p
|
72 |
|
73 |
+
def expert_llm(user_prompt, history, expert_system_prompt, temperature, top_p, api_key):
|
74 |
client = ZhipuAI(api_key=api_key)
|
75 |
if history != []:
|
76 |
prompt_records = convert_to_openai_format(history)
|
|
|
86 |
)
|
87 |
return response.choices[0].message.content
|
88 |
|
89 |
+
def gradio_fn(message, history, api_key):
|
90 |
+
expert_system_prompt, temperature, top_p = master_llm(message, history, api_key)
|
91 |
+
expert_response = expert_llm(message, history, expert_system_prompt, temperature, top_p, api_key)
|
92 |
return expert_response
|
93 |
|
94 |
+
with gr.Blocks() as demo:
|
95 |
+
api_key = gr.Textbox()
|
96 |
+
main_interface = gr.ChatInterface(fn=gradio_fn, additional_inputs=api_key)
|
97 |
|
98 |
if __name__ == "__main__":
|
99 |
+
demo.launch(show_error=True)
|