youngtsai commited on
Commit
5fe8ff1
·
1 Parent(s): 55ea5b2

try operation

Browse files
Files changed (1) hide show
  1. app.py +85 -67
app.py CHANGED
@@ -14,6 +14,7 @@ import tempfile
14
  import urllib.parse
15
  import pandas as pd
16
  import re
 
17
 
18
  # From other files
19
  from storage_service import GoogleCloudStorage
@@ -1241,84 +1242,101 @@ def get_chinese_conversation_thread_id(thread_id):
1241
  thread_id = thread.id
1242
  return thread_id
1243
 
1244
- def get_chinese_paragraph_evaluate_content(thread_id, model, user_content, paragraph):
1245
- # 先做 主題與內容、遣詞造句
1246
- content = generate_content_by_open_ai_assistant(user_content, thread_id, model_name=model)
1247
- print(f"====generate_paragraph_evaluate====")
1248
- print(content)
1249
-
1250
- if "```json" not in content:
1251
- raise gr.Error("網路塞車,或是內容有誤,請稍後重新嘗試!")
1252
-
1253
- content_list = content.split("```json")
1254
- content_text = content_list[0]
1255
- print(f"content_text: {content_text}")
1256
- content_json = content_list[1].split("```")[0]
1257
- print(f"content_json: {content_json}")
1258
 
1259
- data = json.loads(content_json)["results"]
 
 
1260
  headers = ["架構", "評分", "解釋"]
1261
- table_data = [
1262
- ["主題與內容", data['主題與內容']['level'], data['主題與內容']['explanation']],
1263
- ["遣詞造句", data['遣詞造句']['level'], data['遣詞造句']['explanation']],
1264
- ]
1265
-
1266
- # 段落結構
1267
- paragraph_structure_content = generate_content_by_open_ai_assistant_structure_robot(paragraph, thread_id=None, model_name=model)
1268
- print(f"paragraph_structure_content: {paragraph_structure_content}")
1269
-
1270
- if "```json" not in paragraph_structure_content:
1271
- raise gr.Error("網路塞車,或是內容有誤,請稍後重新嘗試!")
1272
-
1273
- paragraph_structure_content_list = paragraph_structure_content.split("```json")
1274
- paragraph_structure_content_text = paragraph_structure_content_list[0]
1275
- paragraph_structure_content_json = paragraph_structure_content_list[1].split("```")[0]
1276
- paragraph_structure_content_table = json.loads(paragraph_structure_content_json)["results"]["段落結構"]
1277
- paragraph_structure_table_data = [
1278
- ["段落結構", paragraph_structure_content_table['level'], paragraph_structure_content_table['explanation']],
1279
- ]
1280
 
1281
- # 合併 table_data
1282
- table_data.extend(paragraph_structure_table_data)
1283
- content_text = content_text + "\n" + paragraph_structure_content_text
1284
-
1285
- # 挑錯字
1286
- spelling_content = generate_content_by_open_ai_assistant_spelling_robot(paragraph, thread_id=None, model_name=model)
1287
- print(f"spelling_content: {spelling_content}")
 
 
 
 
 
 
 
 
 
 
 
 
1288
 
1289
- if "```json" not in spelling_content:
1290
- raise gr.Error("網路塞車,或是內容有誤,請稍後重新嘗試!")
1291
-
1292
- spelling_content_list = spelling_content.split("```json")
1293
- spelling_content_text = spelling_content_list[0]
1294
- spelling_content_json = spelling_content_list[1].split("```")[0]
1295
- spelling_content_table = json.loads(spelling_content_json)["results"]["錯別字"]
1296
- spelling_table_data = [
1297
- ["錯別字", spelling_content_table['level'], spelling_content_table['explanation']],
1298
- ]
 
 
 
 
 
 
1299
 
1300
- # ========= 合併 =========
1301
- table_data.extend(spelling_table_data)
1302
- content_text = content_text + "\n" + spelling_content_text
 
 
 
 
 
 
 
 
 
 
 
 
 
1303
 
1304
  # 綜合評分
1305
- grade = assign_grade(
1306
- data['主題與內容']['level'],
1307
- paragraph_structure_content_table['level'],
1308
- data['遣詞造句']['level'],
1309
- spelling_content_table['level']
1310
- )
1311
- grade_content_text = f"# 綜合評分:{grade}"
1312
- total_content_text = grade_content_text + "\n" + content_text
 
1313
 
1314
- # 綜合回饋
1315
- feedback_match = re.search(r"綜合回饋(.*?)評分標準與回饋", content_text, re.DOTALL)
1316
- feedback_text = feedback_match.group(1).strip() if feedback_match else ""
1317
- table_data.append(["綜合評分", grade, feedback_text])
 
 
 
 
 
 
 
1318
 
1319
  content_table = gr.update(value=table_data, headers=headers, visible=True)
1320
 
1321
- return total_content_text, content_table
1322
 
1323
  def get_chinese_paragraph_1st_evaluate_content(
1324
  thread_id,
 
14
  import urllib.parse
15
  import pandas as pd
16
  import re
17
+ import time
18
 
19
  # From other files
20
  from storage_service import GoogleCloudStorage
 
1242
  thread_id = thread.id
1243
  return thread_id
1244
 
1245
+ def retry_operation(operation, max_attempts=3, delay=2):
1246
+ for attempt in range(max_attempts):
1247
+ try:
1248
+ return operation()
1249
+ except Exception as e:
1250
+ if attempt == max_attempts - 1:
1251
+ raise e
1252
+ time.sleep(delay)
 
 
 
 
 
 
1253
 
1254
+ def get_chinese_paragraph_evaluate_content(thread_id, model, user_content, paragraph):
1255
+ content_text = ""
1256
+ table_data = []
1257
  headers = ["架構", "評分", "解釋"]
1258
+ error_messages = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1259
 
1260
+ # 主題與內容、遣詞造句
1261
+ try:
1262
+ def content_operation():
1263
+ content = generate_content_by_open_ai_assistant(user_content, thread_id, model_name=model)
1264
+ if "```json" not in content:
1265
+ raise ValueError("回應格式不正確")
1266
+ return content
1267
+
1268
+ content = retry_operation(content_operation)
1269
+ content_list = content.split("```json")
1270
+ content_text = content_list[0]
1271
+ content_json = content_list[1].split("```")[0]
1272
+ data = json.loads(content_json)["results"]
1273
+ table_data.extend([
1274
+ ["主題與內容", data['主題與內容']['level'], data['主題與內容']['explanation']],
1275
+ ["遣詞造句", data['遣詞造句']['level'], data['遣詞造句']['explanation']],
1276
+ ])
1277
+ except Exception as e:
1278
+ error_messages.append(f"主題與內容、遣詞造句評估出錯: {str(e)}")
1279
 
1280
+ # 段落結構
1281
+ try:
1282
+ def structure_operation():
1283
+ content = generate_content_by_open_ai_assistant_structure_robot(paragraph, thread_id=None, model_name=model)
1284
+ if "```json" not in content:
1285
+ raise ValueError("回應格式不正確")
1286
+ return content
1287
+
1288
+ paragraph_structure_content = retry_operation(structure_operation)
1289
+ structure_content_list = paragraph_structure_content.split("```json")
1290
+ content_text += "\n" + structure_content_list[0]
1291
+ structure_json = structure_content_list[1].split("```")[0]
1292
+ structure_data = json.loads(structure_json)["results"]["段落結構"]
1293
+ table_data.append(["段落結構", structure_data['level'], structure_data['explanation']])
1294
+ except Exception as e:
1295
+ error_messages.append(f"段落結構評估出錯: {str(e)}")
1296
 
1297
+ # 錯別字
1298
+ try:
1299
+ def spelling_operation():
1300
+ content = generate_content_by_open_ai_assistant_spelling_robot(paragraph, thread_id=None, model_name=model)
1301
+ if "```json" not in content:
1302
+ raise ValueError("回應格式不正確")
1303
+ return content
1304
+
1305
+ spelling_content = retry_operation(spelling_operation)
1306
+ spelling_content_list = spelling_content.split("```json")
1307
+ content_text += "\n" + spelling_content_list[0]
1308
+ spelling_json = spelling_content_list[1].split("```")[0]
1309
+ spelling_data = json.loads(spelling_json)["results"]["錯別字"]
1310
+ table_data.append(["錯別字", spelling_data['level'], spelling_data['explanation']])
1311
+ except Exception as e:
1312
+ error_messages.append(f"錯別字檢查出錯: {str(e)}")
1313
 
1314
  # 綜合評分
1315
+ if len(table_data) == 4: # 確保所有評估都成功完成
1316
+ grade = assign_grade(
1317
+ table_data[0][1], # 主題與內容
1318
+ table_data[2][1], # 段落結構
1319
+ table_data[1][1], # 遣詞造句
1320
+ table_data[3][1] # 錯別字
1321
+ )
1322
+ grade_content_text = f"# 綜合評分:{grade}"
1323
+ content_text = grade_content_text + "\n" + content_text
1324
 
1325
+ # 綜合回饋
1326
+ feedback_match = re.search(r"綜合回饋(.*?)評分標準與回饋", content_text, re.DOTALL)
1327
+ feedback_text = feedback_match.group(1).strip() if feedback_match else "無法提取綜合回饋"
1328
+ table_data.append(["綜合評分", grade, feedback_text])
1329
+ else:
1330
+ error_messages.append("無法進行綜合評分,因為部分評估未完成")
1331
+
1332
+ # 處理錯誤信息
1333
+ if error_messages:
1334
+ error_content = "\n".join(error_messages)
1335
+ content_text = f"評估過程中遇到以下問題:\n{error_content}\n\n" + content_text
1336
 
1337
  content_table = gr.update(value=table_data, headers=headers, visible=True)
1338
 
1339
+ return content_text, content_table
1340
 
1341
  def get_chinese_paragraph_1st_evaluate_content(
1342
  thread_id,