Spaces:
Runtime error
Runtime error
weiwei1392
commited on
Commit
·
182d290
1
Parent(s):
5c79247
init
Browse files- app.py +172 -0
- llm.py +126 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from llm import ChatGLM, OpenAI3, OpenAI4
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
block_css = """
|
6 |
+
.importantButton {
|
7 |
+
background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
|
8 |
+
border: none !important;
|
9 |
+
}
|
10 |
+
.importantButton:hover {
|
11 |
+
background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
|
12 |
+
border: none !important;
|
13 |
+
}"""
|
14 |
+
|
15 |
+
webui_title = "📚📚📚📚📚📚📚📚📚📚📚📚* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ☘️ * * *智海文心* * * ☘️ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *📚📚📚📚📚📚📚📚📚📚📚📚"
|
16 |
+
|
17 |
+
generate_prompt = 'please generate {generate_number} difficult multiple-choice questions and attach the answer to each ' \
|
18 |
+
'question according the paragraph below. Each question should have four choices and only one of them ' \
|
19 |
+
'is the right answer.' \
|
20 |
+
|
21 |
+
correct_prompt = 'the following paragraph is a composition of a student. ' \
|
22 |
+
'please first judge the writing level of the composition, ' \
|
23 |
+
'then point out the all the problems with examples, ' \
|
24 |
+
'and finally output the complete revised version.'
|
25 |
+
|
26 |
+
llm_name_dict = {'chatgpt-3.5': 'OpenAI3', 'chatgpt-4': 'OpenAI4', 'chatglm—6b': 'ChatGLM'}
|
27 |
+
|
28 |
+
|
29 |
+
def files_list_to_texts(files_list, glob="**/[!.]*"):
|
30 |
+
texts = []
|
31 |
+
for i in files_list:
|
32 |
+
if i.name.split('.')[-1] in ['txt']:
|
33 |
+
with open(i.name, encoding='utf-8') as f:
|
34 |
+
text = f.readlines()
|
35 |
+
text = [i for i in text]
|
36 |
+
texts.append(''.join(text))
|
37 |
+
return texts
|
38 |
+
|
39 |
+
|
40 |
+
def function_select(mode):
|
41 |
+
if mode == "问题生成":
|
42 |
+
return gr.update(visible=True), gr.update(visible=False)
|
43 |
+
else:
|
44 |
+
return gr.update(visible=False), gr.update(visible=True)
|
45 |
+
|
46 |
+
|
47 |
+
def llm_change(name):
|
48 |
+
llm = eval(eval('llm_name_dict[name]'))()
|
49 |
+
return llm
|
50 |
+
|
51 |
+
|
52 |
+
def text_generate(chatbot, text, generate_number, llm):
|
53 |
+
prompt = eval('f"' + generate_prompt + '"') + '\n\n' + text
|
54 |
+
answer = llm(prompt)
|
55 |
+
chatbot = chatbot + [[text, answer]]
|
56 |
+
return chatbot
|
57 |
+
|
58 |
+
|
59 |
+
def files_generate(chatbot, files_list, generate_number, llm):
|
60 |
+
try:
|
61 |
+
texts = files_list_to_texts(files_list)
|
62 |
+
for text in texts:
|
63 |
+
prompt = eval('f"' + generate_prompt + '"') + '\n\n' + text
|
64 |
+
answer = llm(prompt)
|
65 |
+
chatbot = chatbot + [[text, answer]]
|
66 |
+
except:
|
67 |
+
chatbot = chatbot + [[None, f"所选文件夹中的文件添加失败,请确保文件夹中含有txt类型文件"]]
|
68 |
+
return chatbot
|
69 |
+
|
70 |
+
|
71 |
+
def text_correct(chatbot, text, llm):
|
72 |
+
prompt = correct_prompt + '\n\n' + text
|
73 |
+
answer = llm(prompt)
|
74 |
+
chatbot = chatbot + [[text, answer]]
|
75 |
+
return chatbot
|
76 |
+
|
77 |
+
|
78 |
+
def files_correct(chatbot, files_list, llm):
|
79 |
+
try:
|
80 |
+
texts = files_list_to_texts(files_list)
|
81 |
+
for text in texts:
|
82 |
+
prompt = correct_prompt + '\n\n' + text
|
83 |
+
answer = llm(prompt)
|
84 |
+
chatbot = chatbot + [[text, answer]]
|
85 |
+
except:
|
86 |
+
chatbot = chatbot + [[None, f"所选文件夹中的文件添加失败,请确保文件夹中含有txt类型文件"]]
|
87 |
+
return chatbot
|
88 |
+
|
89 |
+
|
90 |
+
def clear_screen(chatbot):
|
91 |
+
|
92 |
+
return [[None, None]]
|
93 |
+
|
94 |
+
|
95 |
+
with gr.Blocks(css=block_css) as demo:
|
96 |
+
gr.Markdown('\n\n\n\n')
|
97 |
+
gr.Markdown(webui_title)
|
98 |
+
gr.Markdown('\n\n\n\n')
|
99 |
+
|
100 |
+
llm = gr.State('')
|
101 |
+
model_mode = gr.Radio(['chatglm—6b', "chatgpt-3.5", "chatgpt-4"], label="请选择驱动模型")
|
102 |
+
model_mode.change(fn=llm_change, inputs=[model_mode], outputs=[llm])
|
103 |
+
|
104 |
+
fun_mode = gr.Radio(["问题生成", "作文批改"], label="请选择功能模式")
|
105 |
+
qg = gr.Row(visible=False)
|
106 |
+
aa = gr.Row(visible=False)
|
107 |
+
fun_mode.change(fn=function_select, inputs=[fun_mode], outputs=[qg, aa])
|
108 |
+
|
109 |
+
with qg:
|
110 |
+
|
111 |
+
with gr.Column(scale=10):
|
112 |
+
chatbot = gr.Chatbot([[None, None]],
|
113 |
+
elem_id="chat-box",
|
114 |
+
show_label=False).style(height=800)
|
115 |
+
clear_button = gr.Button(value="清屏")
|
116 |
+
clear_button.click(fn=clear_screen, inputs=[chatbot], outputs=[chatbot])
|
117 |
+
|
118 |
+
with gr.Column(scale=10):
|
119 |
+
|
120 |
+
with gr.Tab('生成配置'):
|
121 |
+
generate_number = gr.Slider(1,
|
122 |
+
5,
|
123 |
+
value=3,
|
124 |
+
step=1,
|
125 |
+
label="请设定单篇文章需要生成的问题数量",
|
126 |
+
interactive=True)
|
127 |
+
|
128 |
+
gr.Markdown(f'单篇生成')
|
129 |
+
texts = gr.Textbox(show_label=False, placeholder="文本内容", lines=12).style(container=False)
|
130 |
+
text_button = gr.Button(value="生成问题")
|
131 |
+
text_button.click(fn=text_generate, inputs=[chatbot, texts, generate_number, llm], outputs=[chatbot])
|
132 |
+
|
133 |
+
gr.Markdown(f'批量生成')
|
134 |
+
folder_address = gr.File(label="添加文件",
|
135 |
+
file_types=['.txt', '.md', '.docx', '.pdf'],
|
136 |
+
file_count="multiple",
|
137 |
+
show_label=False
|
138 |
+
)
|
139 |
+
file_button = gr.Button(value="生成问题")
|
140 |
+
file_button.click(fn=files_generate, inputs=[chatbot, folder_address, generate_number, llm],
|
141 |
+
outputs=[chatbot])
|
142 |
+
|
143 |
+
|
144 |
+
with aa:
|
145 |
+
|
146 |
+
with gr.Column(scale=10):
|
147 |
+
chatbot = gr.Chatbot([[None, None]],
|
148 |
+
elem_id="chat-box",
|
149 |
+
show_label=False).style(height=800)
|
150 |
+
clear_button = gr.Button(value="清屏")
|
151 |
+
clear_button.click(fn=clear_screen, inputs=[chatbot], outputs=[chatbot])
|
152 |
+
|
153 |
+
with gr.Column(scale=10):
|
154 |
+
|
155 |
+
with gr.Tab('批改配置'):
|
156 |
+
|
157 |
+
gr.Markdown(f'单篇批改')
|
158 |
+
texts = gr.Textbox(show_label=False, placeholder="文本内容", lines=16).style(container=False)
|
159 |
+
text_button = gr.Button(value="批改")
|
160 |
+
text_button.click(fn=text_correct, inputs=[chatbot, texts, llm], outputs=[chatbot])
|
161 |
+
|
162 |
+
gr.Markdown(f'批量批改')
|
163 |
+
folder_address = gr.File(label="添加文件",
|
164 |
+
file_types=['.txt', '.md', '.docx', '.pdf'],
|
165 |
+
file_count="multiple",
|
166 |
+
show_label=False
|
167 |
+
)
|
168 |
+
file_button = gr.Button(value="批改")
|
169 |
+
file_button.click(fn=files_correct, inputs=[chatbot, folder_address, llm], outputs=[chatbot])
|
170 |
+
|
171 |
+
|
172 |
+
demo.queue(concurrency_count=5).launch(show_api=False)
|
llm.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.llms.base import LLM
|
2 |
+
from typing import Optional, List
|
3 |
+
from langchain.llms.utils import enforce_stop_tokens
|
4 |
+
import requests
|
5 |
+
|
6 |
+
FORWARD_KEY = 'fk198719-Pmvv22OqZiovaxRq6YxCzkTcd6UVVX5O'
|
7 |
+
|
8 |
+
class ChatGLM(LLM):
|
9 |
+
max_length: int = 10000
|
10 |
+
temperature: float = 0
|
11 |
+
top_p = 0.9
|
12 |
+
tokenizer: object = None
|
13 |
+
model: object = None
|
14 |
+
history_len: int = 10
|
15 |
+
history = []
|
16 |
+
URL = 'http://183.131.3.48:9200'
|
17 |
+
HEADERS = {'Content-Type': 'application/json'}
|
18 |
+
|
19 |
+
@property
|
20 |
+
def _llm_type(self) -> str:
|
21 |
+
return "ChatGLM"
|
22 |
+
|
23 |
+
def _call(self,
|
24 |
+
prompt: str,
|
25 |
+
history: Optional[List[List[str]]] = None,
|
26 |
+
stop: Optional[List[str]] = None) -> str:
|
27 |
+
|
28 |
+
if history:
|
29 |
+
history = [i for i in history if i[0] is not None] # clear out the system message
|
30 |
+
history = history[-self.history_len:]
|
31 |
+
|
32 |
+
params = {'tokenizers': self.tokenizer, 'prompt': prompt, 'history': history, 'top_p': self.top_p,
|
33 |
+
'max_length': self.max_length, 'temperature': self.temperature}
|
34 |
+
|
35 |
+
response = requests.post(self.URL, headers=self.HEADERS, json=params).json()
|
36 |
+
answer = response['response']
|
37 |
+
if stop is not None:
|
38 |
+
answer = enforce_stop_tokens(answer, stop)
|
39 |
+
# question = prompt.split('question:\n')[-1]
|
40 |
+
# self.history = self.history+[[prompt, response]]
|
41 |
+
|
42 |
+
return answer
|
43 |
+
|
44 |
+
|
45 |
+
class OpenAI3(LLM):
|
46 |
+
max_length: int = 10000
|
47 |
+
temperature: float = 0.2
|
48 |
+
top_p = 0.9
|
49 |
+
tokenizer: object = None
|
50 |
+
model: object = None
|
51 |
+
history_len: int = 10
|
52 |
+
history = []
|
53 |
+
HEADERS = {'Content-Type': 'application/json', 'Authorization': 'Bearer fk198719-Pmvv22OqZiovaxRq6YxCzkTcd6UVVX5O'}
|
54 |
+
URL ='https://openai.api2d.net/v1/chat/completions'
|
55 |
+
MODEL_NAME = "gpt-3.5-turbo"
|
56 |
+
|
57 |
+
|
58 |
+
@property
|
59 |
+
def _llm_type(self) -> str:
|
60 |
+
return "ChatGLM"
|
61 |
+
|
62 |
+
def _call(self,
|
63 |
+
prompt: str,
|
64 |
+
history: Optional[List[List[str]]] = None,
|
65 |
+
stop: Optional[List[str]] = None) -> str:
|
66 |
+
|
67 |
+
if history:
|
68 |
+
history = [i for i in history if i[0] is not None]
|
69 |
+
history = history[-self.history_len:]
|
70 |
+
message = [[{"role": "user", "content": i[0]}, {"role": "assistant", "content": i[1]}] for i in history]
|
71 |
+
message = sum(message, [])
|
72 |
+
else:
|
73 |
+
message = []
|
74 |
+
message.append({"role": "user", "content": prompt})
|
75 |
+
params = {"model": self.MODEL_NAME, "messages": message, 'temperature': self.temperature}
|
76 |
+
response = requests.post(self.URL, headers=self.HEADERS, json=params).json()
|
77 |
+
answer = response['choices'][0]['message']['content']
|
78 |
+
if stop is not None:
|
79 |
+
answer = enforce_stop_tokens(answer, stop)
|
80 |
+
# question = prompt.split('question:\n')[-1]
|
81 |
+
# self.history = self.history+[[question, response]]
|
82 |
+
|
83 |
+
return answer
|
84 |
+
|
85 |
+
|
86 |
+
class OpenAI4(LLM):
|
87 |
+
max_length: int = 10000
|
88 |
+
temperature: float = 0.2
|
89 |
+
top_p = 0.9
|
90 |
+
tokenizer: object = None
|
91 |
+
model: object = None
|
92 |
+
history_len: int = 10
|
93 |
+
history = []
|
94 |
+
HEADERS = {'Content-Type': 'application/json', 'Authorization': 'Bearer fk198719-Pmvv22OqZiovaxRq6YxCzkTcd6UVVX5O'}
|
95 |
+
URL ='https://openai.api2d.net/v1/chat/completions'
|
96 |
+
MODEL_NAME = "gpt-4"
|
97 |
+
|
98 |
+
|
99 |
+
@property
|
100 |
+
def _llm_type(self) -> str:
|
101 |
+
return "ChatGLM"
|
102 |
+
|
103 |
+
def _call(self,
|
104 |
+
prompt: str,
|
105 |
+
history: Optional[List[List[str]]] = None,
|
106 |
+
stop: Optional[List[str]] = None) -> str:
|
107 |
+
|
108 |
+
if history:
|
109 |
+
history = [i for i in history if i[0] is not None]
|
110 |
+
history = history[-self.history_len:]
|
111 |
+
message = [[{"role": "user", "content": i[0]}, {"role": "assistant", "content": i[1]}] for i in history]
|
112 |
+
message = sum(message, [])
|
113 |
+
else:
|
114 |
+
message = []
|
115 |
+
message.append({"role": "user", "content": prompt})
|
116 |
+
params = {"model": self.MODEL_NAME, "messages": message, 'temperature': self.temperature}
|
117 |
+
response = requests.post(self.URL, headers=self.HEADERS, json=params).json()
|
118 |
+
answer = response['choices'][0]['message']['content']
|
119 |
+
if stop is not None:
|
120 |
+
answer = enforce_stop_tokens(answer, stop)
|
121 |
+
# question = prompt.split('question:\n')[-1]
|
122 |
+
# self.history = self.history+[[question, response]]
|
123 |
+
|
124 |
+
return answer
|
125 |
+
|
126 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.28.3
|
2 |
+
langchain==0.0.161
|
3 |
+
Requests==2.30.0
|