Spaces:
Sleeping
Sleeping
import gradio as gr | |
import pandas as pd | |
import os | |
from openai import OpenAI | |
OPEN_AI_KEY = os.getenv("OPEN_AI_KEY") | |
client = OpenAI(api_key=OPEN_AI_KEY) | |
def process_file(file): | |
# 读取文件 | |
if file.name.endswith('.csv'): | |
df = pd.read_csv(file) | |
else: | |
df = pd.read_excel(file) | |
df_string = df.to_string() | |
# 根据上传的文件内容生成问题 | |
questions = generate_questions(df_string) | |
# 返回按钮文本和 DataFrame 字符串 | |
return questions[0] if len(questions) > 0 else "", \ | |
questions[1] if len(questions) > 1 else "", \ | |
questions[2] if len(questions) > 2 else "", \ | |
df_string | |
def update_buttons(df_string): | |
# 根据上传的文件内容生成问题 | |
questions = generate_questions(df_string) | |
return questions[:3] # 确保返回三个问题 | |
def generate_questions(df_string): | |
# 使用 OpenAI 生成基于上传数据的问题 | |
sys_content = f"你是一個資料分析師,請用 {df_string_output} 為資料進行對話,使用 zh-TW" | |
messages = [ | |
{"role": "system", "content": sys_content}, | |
{"role": "user", "content": "請根據 {df_string_output} 生成三個問題,並用 JSON 格式返回 questions:[q1, q2, q3]"} | |
] | |
response_format = { "type": "json_object" } | |
print("=====messages=====") | |
print(messages) | |
print("=====messages=====") | |
request_payload = { | |
"model": "gpt-4-1106-preview", | |
"messages": messages, | |
"max_tokens": 2000, | |
"response_format": response_format | |
} | |
json_response = client.chat.completions.create(**request_payload) | |
print("=====json_response=====") | |
print(json_response) | |
# 这里需要编写代码来实现此功能 | |
questions = ["问题 1", "问题 2", "问题 3"] # 示例问题列表 | |
return questions | |
def send_question(question, df_string_output, chat_history): | |
# 当问题按钮被点击时调用此函数 | |
return respond(question, df_string_output, chat_history) | |
def respond(user_message, df_string_output, chat_history): | |
print("=== 變數:user_message ===") | |
print(user_message) | |
print("=== 變數:chat_history ===") | |
print(chat_history) | |
sys_content = f"你是一個資料分析師,請用 {df_string_output} 為資料進行對話,使用 zh-TW" | |
messages = [ | |
{"role": "system", "content": sys_content}, | |
{"role": "user", "content": user_message} | |
] | |
print("=====messages=====") | |
print(messages) | |
print("=====messages=====") | |
request_payload = { | |
"model": "gpt-4-1106-preview", | |
"messages": messages, | |
"max_tokens": 2000 # 設定一個較大的值,可根據需要調整 | |
} | |
response = client.chat.completions.create(**request_payload) | |
print(response) | |
response_text = response.choices[0].message.content.strip() | |
# 更新聊天历史 | |
new_chat_history = (user_message, response_text) | |
if chat_history is None: | |
chat_history = [new_chat_history] | |
else: | |
chat_history.append(new_chat_history) | |
# 返回聊天历史和空字符串清空输入框 | |
return "", chat_history | |
def on_button_click(button, df_string_output, chat_history): | |
# 当按钮被点击时发送对应的问题 | |
return respond(button, df_string_output, chat_history) | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(): | |
file_upload = gr.File(label="Upload your file") | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(label="Message") | |
send_button = gr.Button("Send") | |
with gr.Column(): | |
df_string_output = gr.Textbox(label="raw data") | |
with gr.Group(): | |
gr.Markdown("## 常用问题") | |
btn_1 = gr.Button() | |
btn_2 = gr.Button() | |
btn_3 = gr.Button() | |
send_button.click( | |
respond, | |
inputs=[msg, df_string_output, chatbot], | |
outputs=[msg, chatbot] | |
) | |
# file_upload.change(process_file, inputs=file_upload, outputs=df_string_output) | |
file_upload.change(process_file, inputs=file_upload, outputs=[btn_1, btn_2, btn_3, df_string_output]) | |
# 连接按钮点击事件 | |
btn_1.click(on_button_click, inputs=[btn_1, df_string_output, chatbot], outputs=[msg, chatbot]) | |
btn_2.click(on_button_click, inputs=[btn_2, df_string_output, chatbot], outputs=[msg, chatbot]) | |
btn_3.click(on_button_click, inputs=[btn_3, df_string_output, chatbot], outputs=[msg, chatbot]) | |
demo.launch() | |