DeepLearning101 commited on
Commit
eee5b94
·
verified ·
1 Parent(s): 71e3d07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -51
app.py CHANGED
@@ -1,14 +1,24 @@
 
1
  import gradio as gr
2
  import aiohttp
3
  import asyncio
4
- import json, os
 
 
5
 
 
6
  LLM_API = os.environ.get("LLM_API")
7
  LLM_URL = os.environ.get("LLM_URL")
 
 
 
8
 
9
- USER_ID = "HuggingFace Space" # Placeholder user ID
 
 
 
10
 
11
- async def send_chat_message(LLM_URL, LLM_API, user_input):
12
  payload = {
13
  "inputs": {},
14
  "query": user_input,
@@ -16,55 +26,87 @@ async def send_chat_message(LLM_URL, LLM_API, user_input):
16
  "conversation_id": "",
17
  "user": USER_ID,
18
  }
19
- print("Sending chat message payload:", payload) # Debug information
20
 
21
  async with aiohttp.ClientSession() as session:
22
- async with session.post(
23
- url=f"{LLM_URL}/chat-messages",
24
- headers={"Authorization": f"Bearer {LLM_API}"},
25
- json=payload,
26
- timeout=aiohttp.ClientTimeout(total=60)
27
- ) as response:
28
- if response.status != 200:
29
- print(f"Error: {response.status}")
30
- return f"Error: {response.status}"
31
-
32
- # Handle the stream of events
33
- full_response = []
34
- async for line in response.content:
35
- line = line.decode('utf-8').strip()
36
- if not line:
37
- continue
38
- if "data: " not in line:
39
- continue
40
- try:
41
- print("Received line:", line) # Debug information
42
- data = json.loads(line.split("data: ")[1])
43
- if "answer" in data:
44
- full_response.append(data["answer"])
45
- except (IndexError, json.JSONDecodeError) as e:
46
- print(f"Error parsing line: {line}, error: {e}") # Debug information
47
- continue
48
-
49
- if full_response:
50
- return ''.join(full_response).strip()
51
- else:
52
- return "Error: No thought found in the response"
 
 
53
 
54
  async def handle_input(user_input):
55
- chat_response = await send_chat_message(LLM_URL, LLM_API, user_input)
56
- print("Chat response:", chat_response) # Debug information
 
57
  return chat_response
58
 
59
  def run_sync(user_input):
 
60
  return asyncio.run(handle_input(user_input))
61
 
62
- # Define Gradio interface
63
- user_input = gr.Textbox(label='歡迎問我關於「高熵合金」(High-entropy alloys) 的各種疑難雜症')
64
- examples = [
65
- ["AlCoCrFeNi HEA coating 可用怎樣的實驗方法做到 ?"],
66
- ["請問high entropy nitride coatings的形成,主要可透過那些元素來熱這個材料形成熱穩定?"]
67
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  TITLE = """<h1 align="center">Large Language Model (LLM) Playground 💬 <a href='https://support.maicoin.com/zh-TW/support/home' target='_blank'>Cryptocurrency Exchange FAQ</a></h1>"""
70
  SUBTITLE = """<h2 align="center"><a href='https://www.twman.org' target='_blank'>TonTon Huang Ph.D. @ 2024/06 </a><br></h2>"""
@@ -72,16 +114,41 @@ LINKS = """<a href='https://blog.twman.org/2021/04/ASR.html' target='_blank'>那
72
  <a href='https://blog.twman.org/2023/07/wsl.html' target='_blank'>用PaddleOCR的PPOCRLabel來微調醫療診斷書和收據</a> | <a href='https://blog.twman.org/2023/07/HugIE.html' target='_blank'>基於機器閱讀理解和指令微調的統一信息抽取框架之診斷書醫囑資訊擷取分析</a><br>
73
  <a href='https://huggingface.co/spaces/DeepLearning101/High-Entropy-Alloys-FAQ/blob/main/reference.txt' target='_blank'>「高熵合金」(High-entropy alloys) 參考論文</a><br>"""
74
 
75
- with gr.Blocks() as iface:
 
 
76
  gr.HTML(TITLE)
77
  gr.HTML(SUBTITLE)
78
  gr.HTML(LINKS)
79
- gr.Interface(
80
- fn=run_sync,
81
- inputs=user_input,
82
- outputs="text",
83
- examples=examples,
84
- allow_flagging="never"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  )
86
 
87
- iface.launch()
 
 
 
 
 
 
 
 
 
1
+ import os
2
  import gradio as gr
3
  import aiohttp
4
  import asyncio
5
+ import json
6
+ from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
7
+ from huggingface_hub import HfApi, HfFolder
8
 
9
+ HEA_API_TOKEN = os.environ.get("HF_API_TOKEN")
10
  LLM_API = os.environ.get("LLM_API")
11
  LLM_URL = os.environ.get("LLM_URL")
12
+ USER_ID = "HuggingFace Space"
13
+ HfFolder.save_token(HF_API_TOKEN)
14
+ DATASET_NAME = os.environ.get("DATASET_NAME")
15
 
16
+ try:
17
+ dataset = load_dataset(DATASET_NAME)
18
+ except:
19
+ dataset = DatasetDict({"feedback": Dataset.from_dict({"user_input": [], "response": [], "feedback_type": [], "improvement": []})})
20
 
21
+ async def send_chat_message(user_input):
22
  payload = {
23
  "inputs": {},
24
  "query": user_input,
 
26
  "conversation_id": "",
27
  "user": USER_ID,
28
  }
29
+ print("Sending chat message payload:", payload)
30
 
31
  async with aiohttp.ClientSession() as session:
32
+ try:
33
+ async with session.post(
34
+ url=f"{LLM_URL}/chat-messages",
35
+ headers={"Authorization": f"Bearer {LLM_API}"},
36
+ json=payload,
37
+ timeout=aiohttp.ClientTimeout(total=60)
38
+ ) as response:
39
+ if response.status != 200:
40
+ print(f"Error: {response.status}")
41
+ return f"Error: {response.status}"
42
+
43
+ full_response = []
44
+ async for line in response.content:
45
+ line = line.decode('utf-8').strip()
46
+ if not line:
47
+ continue
48
+ if "data: " not in line:
49
+ continue
50
+ try:
51
+ data = json.loads(line.split("data: ")[1])
52
+ if "answer" in data:
53
+ full_response.append(data["answer"])
54
+ except (IndexError, json.JSONDecodeError) as e:
55
+ print(f"Error parsing line: {line}, error: {e}")
56
+ continue
57
+
58
+ if full_response:
59
+ return ''.join(full_response).strip()
60
+ else:
61
+ return "Error: No thought found in the response"
62
+ except Exception as e:
63
+ print(f"Exception: {e}")
64
+ return f"Exception: {e}"
65
 
66
  async def handle_input(user_input):
67
+ print(f"Handling input: {user_input}")
68
+ chat_response = await send_chat_message(user_input)
69
+ print("Chat response:", chat_response)
70
  return chat_response
71
 
72
  def run_sync(user_input):
73
+ print(f"Running sync with input: {user_input}")
74
  return asyncio.run(handle_input(user_input))
75
 
76
+ def save_feedback(user_input, response, feedback_type, improvement):
77
+ feedback = {
78
+ "user_input": user_input,
79
+ "response": response,
80
+ "feedback_type": feedback_type,
81
+ "improvement": improvement
82
+ }
83
+ print(f"Saving feedback: {feedback}")
84
+ # Append to the dataset
85
+ new_data = {"user_input": [user_input], "response": [response], "feedback_type": [feedback_type], "improvement": [improvement]}
86
+ global dataset
87
+ dataset["feedback"] = dataset["feedback"].add_item(new_data)
88
+ dataset.push_to_hub(DATASET_NAME)
89
+
90
+ def handle_feedback(response, feedback_type, improvement):
91
+ feedback = {
92
+ "response": response,
93
+ "feedback_type": feedback_type,
94
+ "improvement": improvement
95
+ }
96
+ save_feedback(response, feedback_type, improvement)
97
+ return "Thank you for your feedback!"
98
+
99
+ def handle_user_input(user_input):
100
+ print(f"User input: {user_input}")
101
+ return run_sync(user_input)
102
+
103
+ # 读取并显示反馈内容的函数
104
+ def show_feedback():
105
+ try:
106
+ feedbacks = dataset["feedback"].to_pandas().to_dict(orient="records")
107
+ return feedbacks
108
+ except Exception as e:
109
+ return f"Error: {e}"
110
 
111
  TITLE = """<h1 align="center">Large Language Model (LLM) Playground 💬 <a href='https://support.maicoin.com/zh-TW/support/home' target='_blank'>Cryptocurrency Exchange FAQ</a></h1>"""
112
  SUBTITLE = """<h2 align="center"><a href='https://www.twman.org' target='_blank'>TonTon Huang Ph.D. @ 2024/06 </a><br></h2>"""
 
114
  <a href='https://blog.twman.org/2023/07/wsl.html' target='_blank'>用PaddleOCR的PPOCRLabel來微調醫療診斷書和收據</a> | <a href='https://blog.twman.org/2023/07/HugIE.html' target='_blank'>基於機器閱讀理解和指令微調的統一信息抽取框架之診斷書醫囑資訊擷取分析</a><br>
115
  <a href='https://huggingface.co/spaces/DeepLearning101/High-Entropy-Alloys-FAQ/blob/main/reference.txt' target='_blank'>「高熵合金」(High-entropy alloys) 參考論文</a><br>"""
116
 
117
+ iface = gr.Blocks()
118
+
119
+ with iface:
120
  gr.HTML(TITLE)
121
  gr.HTML(SUBTITLE)
122
  gr.HTML(LINKS)
123
+ with gr.Row():
124
+ user_input = gr.Textbox(label='歡迎問我關於「高熵合金」(High-entropy alloys) 的各種疑難雜症', lines=2, placeholder="在此輸入問題...")
125
+ submit_button = gr.Button("提交")
126
+ with gr.Row():
127
+ response_output = gr.Textbox(label='模型回應', interactive=False)
128
+ with gr.Row():
129
+ like_button = gr.Button("👍")
130
+ dislike_button = gr.Button("👎")
131
+ improvement_input = gr.Textbox(label='改進建議', placeholder='請輸入如何改進模型回應的建議...')
132
+ with gr.Row():
133
+ feedback_output = gr.Textbox(label='反饋結果', interactive=False)
134
+ with gr.Row():
135
+ show_feedback_button = gr.Button("查看所有反饋")
136
+ feedback_display = gr.JSON(label='所有反饋')
137
+
138
+ submit_button.click(fn=handle_user_input, inputs=user_input, outputs=response_output)
139
+
140
+ like_button.click(
141
+ fn=lambda response, improvement: handle_feedback(response, "like", improvement),
142
+ inputs=[response_output, improvement_input],
143
+ outputs=feedback_output
144
  )
145
 
146
+ dislike_button.click(
147
+ fn=lambda response, improvement: handle_feedback(response, "dislike", improvement),
148
+ inputs=[response_output, improvement_input],
149
+ outputs=feedback_output
150
+ )
151
+
152
+ show_feedback_button.click(fn=show_feedback, outputs=feedback_display)
153
+
154
+ iface.launch()