Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,64 +1,307 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
"""
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
def respond(
|
| 11 |
-
message,
|
| 12 |
-
history: list[tuple[str, str]],
|
| 13 |
-
system_message,
|
| 14 |
-
max_tokens,
|
| 15 |
-
temperature,
|
| 16 |
-
top_p,
|
| 17 |
-
):
|
| 18 |
-
messages = [{"role": "system", "content": system_message}]
|
| 19 |
-
|
| 20 |
-
for val in history:
|
| 21 |
-
if val[0]:
|
| 22 |
-
messages.append({"role": "user", "content": val[0]})
|
| 23 |
-
if val[1]:
|
| 24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
| 25 |
-
|
| 26 |
-
messages.append({"role": "user", "content": message})
|
| 27 |
-
|
| 28 |
-
response = ""
|
| 29 |
-
|
| 30 |
-
for message in client.chat_completion(
|
| 31 |
-
messages,
|
| 32 |
-
max_tokens=max_tokens,
|
| 33 |
-
stream=True,
|
| 34 |
-
temperature=temperature,
|
| 35 |
-
top_p=top_p,
|
| 36 |
-
):
|
| 37 |
-
token = message.choices[0].delta.content
|
| 38 |
-
|
| 39 |
-
response += token
|
| 40 |
-
yield response
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
"""
|
| 44 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
| 45 |
-
"""
|
| 46 |
-
demo = gr.ChatInterface(
|
| 47 |
-
respond,
|
| 48 |
-
additional_inputs=[
|
| 49 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
| 50 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 51 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 52 |
-
gr.Slider(
|
| 53 |
-
minimum=0.1,
|
| 54 |
-
maximum=1.0,
|
| 55 |
-
value=0.95,
|
| 56 |
-
step=0.05,
|
| 57 |
-
label="Top-p (nucleus sampling)",
|
| 58 |
-
),
|
| 59 |
-
],
|
| 60 |
-
)
|
| 61 |
|
|
|
|
|
|
|
| 62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
if __name__ == "__main__":
|
|
|
|
| 64 |
demo.launch()
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
+
import sys
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
import json
|
| 7 |
+
import base64
|
| 8 |
+
from io import BytesIO
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import argparse
|
| 11 |
+
from vis_python_exe import PythonExecutor
|
| 12 |
+
from openai import OpenAI
|
| 13 |
+
from typing import Optional, Union
|
| 14 |
+
import gradio as gr
|
| 15 |
+
import markdown
|
| 16 |
+
|
| 17 |
+
def encode_image(image):
|
| 18 |
+
"""
|
| 19 |
+
将PIL.Image对象或图像文件路径转换为base64编码字符串
|
| 20 |
+
|
| 21 |
+
参数:
|
| 22 |
+
image: 可以是PIL.Image对象或图像文件路径
|
| 23 |
+
|
| 24 |
+
返回:
|
| 25 |
+
base64编码的字符串
|
| 26 |
+
"""
|
| 27 |
+
if isinstance(image, str):
|
| 28 |
+
# 处理文件路径的情况
|
| 29 |
+
with open(image, "rb") as image_file:
|
| 30 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
| 31 |
+
else:
|
| 32 |
+
# 处理PIL.Image对象的情况
|
| 33 |
+
buffered = BytesIO()
|
| 34 |
+
image.save(buffered, format=image.format if hasattr(image, 'format') else 'PNG')
|
| 35 |
+
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
| 36 |
+
|
| 37 |
+
def excute_codes(codes, messages, executor: PythonExecutor):
|
| 38 |
+
no_code_idx = []
|
| 39 |
+
codes_use = []
|
| 40 |
+
for i, code in enumerate(codes):
|
| 41 |
+
if code == "":
|
| 42 |
+
no_code_idx.append(i)
|
| 43 |
+
else:
|
| 44 |
+
codes_use.append(code)
|
| 45 |
+
batch_results = executor.batch_apply(codes_use, messages)
|
| 46 |
+
return batch_results, no_code_idx
|
| 47 |
+
|
| 48 |
+
def process_prompt_init(question, image, prompt_template, prompt_type):
|
| 49 |
+
prompt_prefix = prompt_template[prompt_type]
|
| 50 |
+
|
| 51 |
+
image_base64 = encode_image(image)
|
| 52 |
+
question_with_options = question
|
| 53 |
+
|
| 54 |
+
messages = [
|
| 55 |
+
{
|
| 56 |
+
"role": "user",
|
| 57 |
+
"content": [{"type": "text", "text": "<image_clue_0>"}] + [{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64}"}}] + [{"type": "text", "text": "</image_clue_0>\n\n"}] + [{"type": "text", "text": prompt_prefix.format(query=question_with_options)}]
|
| 58 |
+
}
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
return messages
|
| 62 |
+
|
| 63 |
+
def update_messages_with_excu_content(messages, images_result, text_result, image_clue_idx):
|
| 64 |
+
new_messages = []
|
| 65 |
+
image_content = []
|
| 66 |
+
for message_item in messages[:-1]:
|
| 67 |
+
new_messages.append(message_item)
|
| 68 |
+
|
| 69 |
+
assistant_message_item = messages[-1]['content']
|
| 70 |
+
interpreter_message_text_prefix = [{"type": "text", "text": f"<interpreter>\nText Result:\n{text_result}\nImage Result:\n"}]
|
| 71 |
+
if images_result is not None:
|
| 72 |
+
for image_base64_item in images_result:
|
| 73 |
+
interpreter_message_images = [{"type": "text", "text": f"<image_clue_{image_clue_idx}>"}] + [{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_base64_item}"}}] + [{"type": "text", "text": f"</image_clue_{image_clue_idx}>"}]
|
| 74 |
+
image_content += interpreter_message_images
|
| 75 |
+
image_clue_idx += 1
|
| 76 |
+
else:
|
| 77 |
+
image_content = [{"type": "text", "text": "None"}]
|
| 78 |
+
interpreter_message_text_profill = [{"type": "text", "text": "</interpreter>\n"}]
|
| 79 |
+
|
| 80 |
+
assistant_message_item = assistant_message_item + interpreter_message_text_prefix + image_content + interpreter_message_text_profill
|
| 81 |
+
new_messages.append({"role": "assistant", "content": assistant_message_item})
|
| 82 |
+
return new_messages, image_clue_idx
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def update_messages_with_code(messages, generated_content):
|
| 87 |
+
message_item = {
|
| 88 |
+
"role": "assistant",
|
| 89 |
+
"content": [{"type": "text", "text": f"{generated_content}</code>\n"}]
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
messages.append(message_item)
|
| 93 |
+
return messages
|
| 94 |
|
| 95 |
+
def update_messages_with_text(messages, generated_content):
|
| 96 |
+
message_item = {
|
| 97 |
+
"role": "assistant",
|
| 98 |
+
"content": [{"type": "text", "text": f"{generated_content}"}]
|
| 99 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
+
messages.append(message_item)
|
| 102 |
+
return messages
|
| 103 |
|
| 104 |
+
def call_chatgpt_api(messages, client, max_tokens=10000, stop=None, temperature=1.1):
|
| 105 |
+
"""Call ChatGPT API with the given messages"""
|
| 106 |
+
try:
|
| 107 |
+
response = client.chat.completions.create(
|
| 108 |
+
model="gpt-4.1", # 使用支持视觉的模型
|
| 109 |
+
messages=messages,
|
| 110 |
+
max_tokens=max_tokens,
|
| 111 |
+
temperature=temperature,
|
| 112 |
+
top_p=1.0,
|
| 113 |
+
stop=stop
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
response_text = response.choices[0].message.content
|
| 117 |
+
|
| 118 |
+
# 检查是否遇到停止标记
|
| 119 |
+
stop_reason = None
|
| 120 |
+
if stop and any(s in response_text for s in stop):
|
| 121 |
+
for s in stop:
|
| 122 |
+
if s in response_text:
|
| 123 |
+
stop_reason = s
|
| 124 |
+
break
|
| 125 |
+
else:
|
| 126 |
+
stop_reason = response.choices[0].finish_reason
|
| 127 |
+
|
| 128 |
+
if "<code>" in response_text:
|
| 129 |
+
stop_reason = "</code>"
|
| 130 |
+
|
| 131 |
+
return response_text, stop_reason
|
| 132 |
+
|
| 133 |
+
except Exception as e:
|
| 134 |
+
print(f"API Error: {str(e)}")
|
| 135 |
+
return None, None
|
| 136 |
+
|
| 137 |
+
def evaluate_single_data(data, client, executor, prompt_template, prompt_type):
|
| 138 |
+
|
| 139 |
+
messages = process_prompt_init(data["question"], data['image'], prompt_template, prompt_type)
|
| 140 |
+
|
| 141 |
+
# 生成初始响应
|
| 142 |
+
response_text, pred_stop_reason = call_chatgpt_api(
|
| 143 |
+
messages,
|
| 144 |
+
client,
|
| 145 |
+
max_tokens=10000,
|
| 146 |
+
stop=["</code>"]
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
if response_text is None:
|
| 150 |
+
print("Failed to get response from API")
|
| 151 |
+
return {
|
| 152 |
+
"input": data["question"],
|
| 153 |
+
"output": data["answer"],
|
| 154 |
+
"prediction": {
|
| 155 |
+
"solution": "API Error",
|
| 156 |
+
"correctness": False,
|
| 157 |
+
"code_execution_count": 0,
|
| 158 |
+
}
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
# 处理响应
|
| 162 |
+
final_response = response_text
|
| 163 |
+
code_execution_count = 0
|
| 164 |
+
image_clue_idx = 1
|
| 165 |
+
|
| 166 |
+
while True:
|
| 167 |
+
# 检查是否需要执行代码
|
| 168 |
+
if args.exe_code and pred_stop_reason == "</code>":
|
| 169 |
+
# 提取要执行的代码
|
| 170 |
+
messages = update_messages_with_code(messages, response_text)
|
| 171 |
+
code_to_execute = response_text.split("```python")[-1].split("```")[0].strip()
|
| 172 |
+
|
| 173 |
+
# 执行代码
|
| 174 |
+
exe_result = excute_codes([code_to_execute], messages, executor)[0][0]
|
| 175 |
+
if exe_result is None:
|
| 176 |
+
text_result = "None"
|
| 177 |
+
images_result = None
|
| 178 |
+
else:
|
| 179 |
+
output, report = exe_result
|
| 180 |
+
try:
|
| 181 |
+
text_result = exe_result[0]['text']
|
| 182 |
+
except:
|
| 183 |
+
text_result = None
|
| 184 |
+
try:
|
| 185 |
+
images_result = exe_result[0]['images']
|
| 186 |
+
except:
|
| 187 |
+
images_result = None
|
| 188 |
+
|
| 189 |
+
messages, new_image_clue_idx = update_messages_with_excu_content(messages, images_result, text_result, image_clue_idx)
|
| 190 |
+
image_clue_idx = new_image_clue_idx
|
| 191 |
+
|
| 192 |
+
code_execution_count += 1
|
| 193 |
+
|
| 194 |
+
# 生成下一部分响应
|
| 195 |
+
response_text, pred_stop_reason = call_chatgpt_api(
|
| 196 |
+
messages,
|
| 197 |
+
client,
|
| 198 |
+
max_tokens=10000,
|
| 199 |
+
stop=["</code>"]
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
else:
|
| 205 |
+
final_response = response_text
|
| 206 |
+
messages = update_messages_with_text(messages, response_text)
|
| 207 |
+
break
|
| 208 |
+
|
| 209 |
+
return messages
|
| 210 |
+
|
| 211 |
+
def process_message(messages):
|
| 212 |
+
# 创建HTML输出
|
| 213 |
+
html_output = '<div style="color: black;">' # 添加一个包裹所有内容的div,设置文本颜色为黑色
|
| 214 |
+
|
| 215 |
+
for message_item in messages:
|
| 216 |
+
role = message_item['role']
|
| 217 |
+
content = message_item['content']
|
| 218 |
+
|
| 219 |
+
# 根据角色设置样式
|
| 220 |
+
if role == "user" or role == "human":
|
| 221 |
+
html_output += f'<div style="background-color: #f0f0f0; padding: 10px; margin: 10px 0; border-radius: 10px; color: black;"><strong>User:</strong><br>'
|
| 222 |
+
elif role == "assistant":
|
| 223 |
+
html_output += f'<div style="background-color: #e6f7ff; padding: 10px; margin: 10px 0; border-radius: 10px; color: black;"><strong>Assistant:</strong><br>'
|
| 224 |
+
else:
|
| 225 |
+
html_output += f'<div style="background-color: #f9f9f9; padding: 10px; margin: 10px 0; border-radius: 10px; color: black;"><strong>{role.capitalize()}:</strong><br>'
|
| 226 |
+
|
| 227 |
+
# 处理内容
|
| 228 |
+
for content_item in content:
|
| 229 |
+
content_type = content_item['type']
|
| 230 |
+
|
| 231 |
+
if content_type == "text":
|
| 232 |
+
# 将Markdown文本转换为HTML
|
| 233 |
+
md_text = content_item['text']
|
| 234 |
+
html_text = markdown.markdown(md_text, extensions=['fenced_code', 'codehilite'])
|
| 235 |
+
# html_text = markdown.markdown(md_text)
|
| 236 |
+
# html_text = md_text
|
| 237 |
+
html_output += f'<div style="color: black;">{html_text}</div>'
|
| 238 |
+
|
| 239 |
+
elif content_type == "image_url":
|
| 240 |
+
content_value = content_item['image_url']['url']
|
| 241 |
+
# 如果是base64图片
|
| 242 |
+
if content_value.startswith("data:"):
|
| 243 |
+
html_output += f'<img src="{content_value}" style="max-width: 100%; margin: 10px 0;">'
|
| 244 |
+
else:
|
| 245 |
+
html_output += f'<img src="{content_value}" style="max-width: 100%; margin: 10px 0;">'
|
| 246 |
+
|
| 247 |
+
html_output += '</div>'
|
| 248 |
+
|
| 249 |
+
html_output += '</div>' # 关闭最外层div
|
| 250 |
+
return html_output
|
| 251 |
+
|
| 252 |
+
def o3_chat(api_key, base_url, question, image):
|
| 253 |
+
# 初始化组件
|
| 254 |
+
client = OpenAI(api_key=api_key, base_url=base_url)
|
| 255 |
+
executor = PythonExecutor()
|
| 256 |
+
|
| 257 |
+
prompt_template = json.load(open("./vis_python_template.json", "r", encoding="utf-8"))
|
| 258 |
+
prompt_type = 'vistool'
|
| 259 |
+
|
| 260 |
+
data = {
|
| 261 |
+
"question": question,
|
| 262 |
+
"image": image,
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
# 评估单个数据点
|
| 266 |
+
messages = evaluate_single_data(data, client, executor, prompt_template, prompt_type)
|
| 267 |
+
html_output = process_message(messages)
|
| 268 |
+
return html_output
|
| 269 |
+
|
| 270 |
+
# Gradio界面
|
| 271 |
+
def create_demo():
|
| 272 |
+
with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
| 273 |
+
gr.Markdown("# O3 Visual Python Interpreter")
|
| 274 |
+
gr.Markdown("Upload an image and ask a question to get a response with code execution capabilities.")
|
| 275 |
+
|
| 276 |
+
with gr.Row():
|
| 277 |
+
with gr.Column(scale=1):
|
| 278 |
+
api_key = gr.Textbox(label="OpenAI API Key", type="password")
|
| 279 |
+
base_url = gr.Textbox(label="Base URL (optional)", value="https://api.openai.com/v1")
|
| 280 |
+
image_input = gr.Image(type="pil", label="Upload Image")
|
| 281 |
+
question = gr.Textbox(label="Question", placeholder="Ask a question about the image...")
|
| 282 |
+
submit_btn = gr.Button("Submit")
|
| 283 |
+
|
| 284 |
+
with gr.Column(scale=2):
|
| 285 |
+
output = gr.HTML(label="Response")
|
| 286 |
+
|
| 287 |
+
submit_btn.click(
|
| 288 |
+
fn=o3_chat,
|
| 289 |
+
inputs=[api_key, base_url, question, image_input],
|
| 290 |
+
outputs=output
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
gr.Markdown("""
|
| 294 |
+
## Examples
|
| 295 |
+
Try asking questions like:
|
| 296 |
+
- "What's in this image?"
|
| 297 |
+
- "Can you analyze the data in this chart?"
|
| 298 |
+
- "Generate a similar visualization with Python"
|
| 299 |
+
""")
|
| 300 |
+
|
| 301 |
+
return demo
|
| 302 |
+
|
| 303 |
+
# 创建并启动应用
|
| 304 |
if __name__ == "__main__":
|
| 305 |
+
demo = create_demo()
|
| 306 |
demo.launch()
|
| 307 |
+
|