youngtsai commited on
Commit
1b696e5
·
1 Parent(s): ac10d4c

generate_points

Browse files
Files changed (1) hide show
  1. app.py +125 -90
app.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
  import gradio as gr
3
  from openai import OpenAI
4
 
@@ -26,6 +25,7 @@ from assignment_ui import create_assignment_ui
26
  from assignment_service import AssignmentService
27
  from submission_service import SubmissionService
28
  from dashboard_service import DashboardService
 
29
 
30
  is_env_local = os.getenv("IS_ENV_LOCAL", "false") == "true"
31
  print(f"is_env_local: {is_env_local}")
@@ -72,7 +72,6 @@ vertexai.init(
72
  credentials=google_creds,
73
  )
74
 
75
-
76
  _AssignmentService = AssignmentService(GCS_SERVICE)
77
  _SubmissionService = SubmissionService(GCS_SERVICE)
78
  _DashboardService = DashboardService(_AssignmentService, _SubmissionService)
@@ -158,98 +157,129 @@ def get_exam_history():
158
 
159
  def generate_topics(model, max_tokens, sys_content, scenario, eng_level, user_generate_topics_prompt):
160
  """
161
- 根据系统提示和用户输入的情境及主题,调用OpenAI API生成相关的主题句。
162
- """
163
-
164
- exam_history = get_exam_history()
165
- exam_history_prompt = f"""
166
- Please refer a topic scenario from the following exam history:
167
- {exam_history}
168
- Base on English level to give similar topic scenario. But don't use the same topic scenario.
169
- """
170
-
171
- user_content = f"""
172
- english level is: {eng_level}
173
- ---
174
- exam_history_prompt: {exam_history_prompt}
175
- ---
176
- {user_generate_topics_prompt}
177
  """
178
- messages = [
179
- {"role": "system", "content": sys_content},
180
- {"role": "user", "content": user_content}
181
- ]
 
 
 
182
 
183
- request_payload = {
184
- "model": model,
185
- "messages": messages,
186
- "max_tokens": max_tokens,
187
- "response_format": { "type": "json_object" }
188
- }
 
 
 
 
 
 
189
 
190
- try:
191
- # response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
192
- # content = response.choices[0].message.content
193
- # topics = json.loads(content)["topics"]
194
- # print(f"====generate_topics====")
195
- # print(topics)
196
- # gr_update = gr.update(choices=topics, visible=True)
197
-
198
- model_name = "gemini-1.5-pro"
199
- gemini_model = GenerativeModel(model_name=model_name)
200
- model_response = gemini_model.generate_content(
201
- f"{sys_content}, {user_content}"
 
 
 
 
202
  )
203
- content = model_response.candidates[0].content.parts[0].text
204
- print(f"====generate_topics====")
205
- print(content)
206
- print("=====")
207
- if "```json" in content:
208
  content = content.replace("```json", "").replace("```", "")
209
- topics = json.loads(content)["topics"]
210
- gr_update = gr.update(choices=topics, visible=True)
211
- except Exception as e:
212
- print(f"An error occurred while generating topics: {e}")
213
- raise gr.Error("網路塞車,請重新嘗試一次!")
 
 
 
 
214
 
215
- return gr_update
 
 
 
 
 
 
 
216
 
217
  def update_topic_input(topic):
218
  return topic
219
 
220
  def generate_points(model, max_tokens, sys_content, scenario, eng_level, topic, user_generate_points_prompt):
221
  """
222
- 根据系统提示和用户输入的情境、主题,调用OpenAI API生成相关的主题句。
223
- """
224
- user_content = f"""
225
- scenario is: {scenario}
226
- english level is: {eng_level}
227
- topic is: {topic}
228
- ---
229
- {user_generate_points_prompt}
230
  """
231
- messages = [
232
- {"role": "system", "content": sys_content},
233
- {"role": "user", "content": user_content}
234
- ]
 
 
 
 
 
 
 
 
 
235
 
236
- request_payload = {
237
- "model": model,
238
- "messages": messages,
239
- "max_tokens": max_tokens,
240
- "response_format": { "type": "json_object" }
241
- }
 
 
 
 
 
 
 
 
 
 
 
242
 
243
- try:
244
- response = OPEN_AI_CLIENT.chat.completions.create(**request_payload)
245
- content = response.choices[0].message.content
246
- points = json.loads(content)["points"]
247
- gr_update = gr.update(choices=points, visible=True)
248
- except Exception as e:
249
- print(f"An error occurred while generating points: {e}")
250
- raise gr.Error("網路塞車,請重新嘗試一次!")
 
 
 
 
251
 
252
- return gr_update
 
 
 
 
 
 
 
253
 
254
  def update_points_input(points):
255
  return points
@@ -1401,7 +1431,7 @@ def get_chinese_paragraph_1st_evaluate_content(
1401
 
1402
  # Output format:
1403
  1. 先給 綜合回饋、評分標準與回饋、修改範例
1404
- 2. 再將評分標準與回饋的內容以JSON格式輸出,並且請使用繁體中文(ZH-TW)來評分段落並輸出,用 ```json ..... ``` 包裹:
1405
  3. please use Chinese language (ZH-TW) to evaluate the paragraph and output use JSON format
1406
  4. json 內容只需要給「主題與內容」、「遣詞造句」的評分標準與回饋
1407
 
@@ -2098,8 +2128,8 @@ with gr.Blocks(theme=THEME, css=CSS) as demo:
2098
  gr.Image(value="https://storage.googleapis.com/jutor/jutor_en_chinese.jpg", show_label=False, show_download_button=False)
2099
 
2100
  # ===== 基礎級使用者 =====
2101
- with gr.Row(visible=False) as default_params:
2102
- model = gr.Radio(["gpt-4o", "gpt-4-turbo"], label="Model", value="gpt-4o")
2103
  max_tokens = gr.Slider(minimum=50, maximum=4000, value=4000, label="Max Tokens")
2104
  sys_content_input = gr.Textbox(label="System Prompt", value="You are an English teacher who is practicing with me to improve my English writing skill.")
2105
  with gr.Row():
@@ -2168,6 +2198,10 @@ with gr.Blocks(theme=THEME, css=CSS) as demo:
2168
  Give me 10 randon topics,
2169
  for a paragraph. Just the topics, no explanation, use English language base on eng_level.
2170
  Make sure the vocabulary you use is at eng_level.
 
 
 
 
2171
  output use JSON
2172
  EXAMPLE:
2173
  "topics":["topic1", "topic2", "topic3", "topic4", "topic5", "topic6", "topic7", "topic8", "topic9", "topic10"]
@@ -2215,10 +2249,11 @@ with gr.Blocks(theme=THEME, css=CSS) as demo:
2215
  Please provide main points to develop in a paragraph about topic in the context of scenario,
2216
  use simple English language and make sure the vocabulary you use is at eng_level.
2217
  No more explanation either no developing these points into a simple paragraph.
 
2218
  Output use JSON format
2219
  EXAMPLE:
2220
  {{
2221
- "points":["point1", "point2", "point3"]
2222
  }}
2223
  """
2224
  user_generate_points_prompt = gr.Textbox(label="Points Prompt", value=default_generate_points_prompt, visible=False)
@@ -2690,11 +2725,11 @@ with gr.Blocks(theme=THEME, css=CSS) as demo:
2690
 
2691
  Final Output JSON Format:
2692
  {{
2693
- content“: {{contents dict}},
2694
- organization“: {{organization'dict}},
2695
- grammar_and_usage“: {{grammar_and_usage'dict}},
2696
- vocabulary“: {{vocabulary'dict}},
2697
- coherence_and_cohesion“: {{coherence_and_cohesion'dict}}
2698
  }}
2699
  """
2700
  user_generate_paragraph_evaluate_prompt = gr.Textbox(label="Paragraph evaluate Prompt", value=default_user_generate_paragraph_evaluate_prompt, visible=False)
@@ -3029,7 +3064,7 @@ with gr.Blocks(theme=THEME, css=CSS) as demo:
3029
  with gr.Column():
3030
  generate_full_paragraph_refine_button = gr.Button("✨ JUTOR 段落改善建議", variant="primary")
3031
  full_paragraph_refine_output_table = gr.DataFrame(label="段落改善建議", wrap=True, interactive=False)
3032
- full_paragraph_refine_output = gr.HTML(label="修改建議", visible=False)
3033
  gr.Markdown("## 修改結果")
3034
  full_paragraph_refine_output_diff = gr.HTML()
3035
 
@@ -3574,7 +3609,7 @@ with gr.Blocks(theme=THEME, css=CSS) as demo:
3574
  chinese_full_paragraph_refine_input = gr.TextArea(label="這是你的原始寫作內容,參考建議,你可以選擇是否修改:", show_copy_button=True)
3575
  with gr.Column():
3576
  with gr.Row():
3577
- generate_chinese_full_paragraph_refine_button = gr.Button("段落全文分析", variant="primary")
3578
  with gr.Row():
3579
  chinese_full_paragraph_refine_output_text = gr.Markdown(label="段落全文分析")
3580
  with gr.Row():
@@ -3741,7 +3776,7 @@ with gr.Blocks(theme=THEME, css=CSS) as demo:
3741
  gr.Markdown("<span style='color:#4e80ee'>作業繳交截止時間</span>")
3742
  chinese_assignment_submission_deadline_history_log = gr.Markdown()
3743
  gr.Markdown("---")
3744
- gr.Markdown("# 回傳作業內容")
3745
 
3746
  with gr.Row():
3747
  with gr.Column():
 
 
1
  import gradio as gr
2
  from openai import OpenAI
3
 
 
25
  from assignment_service import AssignmentService
26
  from submission_service import SubmissionService
27
  from dashboard_service import DashboardService
28
+ from llms import GeminiProvider, OpenAIProvider, LLMService
29
 
30
  is_env_local = os.getenv("IS_ENV_LOCAL", "false") == "true"
31
  print(f"is_env_local: {is_env_local}")
 
72
  credentials=google_creds,
73
  )
74
 
 
75
  _AssignmentService = AssignmentService(GCS_SERVICE)
76
  _SubmissionService = SubmissionService(GCS_SERVICE)
77
  _DashboardService = DashboardService(_AssignmentService, _SubmissionService)
 
157
 
158
  def generate_topics(model, max_tokens, sys_content, scenario, eng_level, user_generate_topics_prompt):
159
  """
160
+ 根据系统提示和用户输入的情境及主题,调用 LLM API 生成相关的主题句。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
  """
162
+ try:
163
+ exam_history = get_exam_history()
164
+ exam_history_prompt = f"""
165
+ Please refer a topic scenario from the following exam history:
166
+ {exam_history}
167
+ Base on English level to give similar topic scenario. But don't use the same topic scenario.
168
+ """
169
 
170
+ user_content = f"""
171
+ english level is: {eng_level}
172
+ ---
173
+ exam_history_prompt: {exam_history_prompt}
174
+ ---
175
+ {user_generate_topics_prompt}
176
+ """
177
+
178
+ messages = [
179
+ {"role": "system", "content": sys_content},
180
+ {"role": "user", "content": user_content}
181
+ ]
182
 
183
+ # 根據模型選擇 provider
184
+ if "gemini" in model.lower():
185
+ print("====gemini====")
186
+ provider = GeminiProvider()
187
+ else:
188
+ print("====openai====")
189
+ provider = OpenAIProvider(OPEN_AI_CLIENT)
190
+
191
+ # 使用 LLMService 處理請求
192
+ llm_service = LLMService(provider)
193
+ content = llm_service.chat(
194
+ prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
195
+ messages=messages,
196
+ model=model,
197
+ max_tokens=max_tokens,
198
+ response_format={"type": "json_object"}
199
  )
200
+
201
+ # 處理回應格式
202
+ if isinstance(content, str) and "```json" in content:
 
 
203
  content = content.replace("```json", "").replace("```", "")
204
+
205
+ try:
206
+ topics = json.loads(content)["topics"]
207
+ if not topics or not isinstance(topics, list):
208
+ raise ValueError("Invalid topics format")
209
+ return gr.update(choices=topics, visible=True)
210
+ except (json.JSONDecodeError, KeyError, ValueError) as e:
211
+ print(f"Error parsing topics: {e}")
212
+ raise gr.Error("無法解析主題,請重新嘗試")
213
 
214
+ except Exception as e:
215
+ print(f"An error occurred while generating topics: {str(e)}")
216
+ error_msg = "網路塞車,請重新嘗試一次!"
217
+ if "rate limit" in str(e).lower():
218
+ error_msg = "請求過於頻繁,請稍後再試"
219
+ elif "invalid_request_error" in str(e).lower():
220
+ error_msg = "請求格式錯誤,請檢查輸入"
221
+ raise gr.Error(error_msg)
222
 
223
  def update_topic_input(topic):
224
  return topic
225
 
226
  def generate_points(model, max_tokens, sys_content, scenario, eng_level, topic, user_generate_points_prompt):
227
  """
228
+ 根据系统提示和用户输入的情境、主题,调用 LLM API 生成相关的主题句。
 
 
 
 
 
 
 
229
  """
230
+ try:
231
+ user_content = f"""
232
+ scenario is: {scenario}
233
+ english level is: {eng_level}
234
+ topic is: {topic}
235
+ ---
236
+ {user_generate_points_prompt}
237
+ """
238
+
239
+ messages = [
240
+ {"role": "system", "content": sys_content},
241
+ {"role": "user", "content": user_content}
242
+ ]
243
 
244
+ # 根據模型選擇 provider
245
+ if "gemini" in model.lower():
246
+ print("====gemini====")
247
+ provider = GeminiProvider()
248
+ else:
249
+ print("====openai====")
250
+ provider = OpenAIProvider(OPEN_AI_CLIENT)
251
+
252
+ # 使用 LLMService 處理請求
253
+ llm_service = LLMService(provider)
254
+ content = llm_service.chat(
255
+ prompt=f"{sys_content}\n{user_content}" if "gemini" in model.lower() else None,
256
+ messages=messages,
257
+ model=model,
258
+ max_tokens=max_tokens,
259
+ response_format={"type": "json_object"}
260
+ )
261
 
262
+ # 處理回應格式
263
+ if isinstance(content, str) and "```json" in content:
264
+ content = content.replace("```json", "").replace("```", "")
265
+
266
+ try:
267
+ points = json.loads(content)["points"]
268
+ if not points or not isinstance(points, list):
269
+ raise ValueError("Invalid points format")
270
+ return gr.update(choices=points, visible=True)
271
+ except (json.JSONDecodeError, KeyError, ValueError) as e:
272
+ print(f"Error parsing points: {e}")
273
+ raise gr.Error("無法解析要點,請重新嘗試")
274
 
275
+ except Exception as e:
276
+ print(f"An error occurred while generating points: {str(e)}")
277
+ error_msg = "網路塞車,請重新嘗試一次!"
278
+ if "rate limit" in str(e).lower():
279
+ error_msg = "請求過於頻繁,請稍後再試"
280
+ elif "invalid_request_error" in str(e).lower():
281
+ error_msg = "請求格式錯誤,請檢查輸入"
282
+ raise gr.Error(error_msg)
283
 
284
  def update_points_input(points):
285
  return points
 
1431
 
1432
  # Output format:
1433
  1. 先給 綜合回饋、評分標準與回饋、修改範例
1434
+ 2. 再將評分標準與回饋的容以JSON格式輸出,並且請使用繁體中文(ZH-TW)來評分段落並輸出,用 ```json ..... ``` 包裹:
1435
  3. please use Chinese language (ZH-TW) to evaluate the paragraph and output use JSON format
1436
  4. json 內容只需要給「主題與內容」、「遣詞造句」的評分標準與回饋
1437
 
 
2128
  gr.Image(value="https://storage.googleapis.com/jutor/jutor_en_chinese.jpg", show_label=False, show_download_button=False)
2129
 
2130
  # ===== 基礎級使用者 =====
2131
+ with gr.Row(visible=True) as default_params:
2132
+ model = gr.Radio(["gpt-4o", "gpt-4-turbo", "gemini-1.5-pro"], label="Model", value="gemini-1.5-pro")
2133
  max_tokens = gr.Slider(minimum=50, maximum=4000, value=4000, label="Max Tokens")
2134
  sys_content_input = gr.Textbox(label="System Prompt", value="You are an English teacher who is practicing with me to improve my English writing skill.")
2135
  with gr.Row():
 
2198
  Give me 10 randon topics,
2199
  for a paragraph. Just the topics, no explanation, use English language base on eng_level.
2200
  Make sure the vocabulary you use is at eng_level.
2201
+ more funny and interesting topics are better.
2202
+ such as my favorite cartoon/anime, my favorite food, my favorite movie, etc.
2203
+ but not always about yourself. Maybe like "Games at school" or "Being a vegetarian".
2204
+ You can give some topics start with My ..., but not every topic use the same prefix.
2205
  output use JSON
2206
  EXAMPLE:
2207
  "topics":["topic1", "topic2", "topic3", "topic4", "topic5", "topic6", "topic7", "topic8", "topic9", "topic10"]
 
2249
  Please provide main points to develop in a paragraph about topic in the context of scenario,
2250
  use simple English language and make sure the vocabulary you use is at eng_level.
2251
  No more explanation either no developing these points into a simple paragraph.
2252
+ At least 5 points/keywords and complete the paragraph.
2253
  Output use JSON format
2254
  EXAMPLE:
2255
  {{
2256
+ "points":["point1", "point2", "point3", "point4", "point5"]
2257
  }}
2258
  """
2259
  user_generate_points_prompt = gr.Textbox(label="Points Prompt", value=default_generate_points_prompt, visible=False)
 
2725
 
2726
  Final Output JSON Format:
2727
  {{
2728
+ "content": {{content's dict}},
2729
+ "organization": {{organization'dict}},
2730
+ "grammar_and_usage": {{grammar_and_usage'dict}},
2731
+ "vocabulary": {{vocabulary'dict}},
2732
+ "coherence_and_cohesion": {{coherence_and_cohesion'dict}}
2733
  }}
2734
  """
2735
  user_generate_paragraph_evaluate_prompt = gr.Textbox(label="Paragraph evaluate Prompt", value=default_user_generate_paragraph_evaluate_prompt, visible=False)
 
3064
  with gr.Column():
3065
  generate_full_paragraph_refine_button = gr.Button("✨ JUTOR 段落改善建議", variant="primary")
3066
  full_paragraph_refine_output_table = gr.DataFrame(label="段落改善建議", wrap=True, interactive=False)
3067
+ full_paragraph_refine_output = gr.HTML(label="改建議", visible=False)
3068
  gr.Markdown("## 修改結果")
3069
  full_paragraph_refine_output_diff = gr.HTML()
3070
 
 
3609
  chinese_full_paragraph_refine_input = gr.TextArea(label="這是你的原始寫作內容,參考建議,你可以選擇是否修改:", show_copy_button=True)
3610
  with gr.Column():
3611
  with gr.Row():
3612
+ generate_chinese_full_paragraph_refine_button = gr.Button("���落全文分析", variant="primary")
3613
  with gr.Row():
3614
  chinese_full_paragraph_refine_output_text = gr.Markdown(label="段落全文分析")
3615
  with gr.Row():
 
3776
  gr.Markdown("<span style='color:#4e80ee'>作業繳交截止時間</span>")
3777
  chinese_assignment_submission_deadline_history_log = gr.Markdown()
3778
  gr.Markdown("---")
3779
+ gr.Markdown("# 回傳作業容")
3780
 
3781
  with gr.Row():
3782
  with gr.Column():