Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from openai import OpenAI | |
HF_TOKEN = os.environ.get("HF_TOKEN", "ضع_التوكن_هنا") | |
client = OpenAI( | |
base_url="https://router.huggingface.co/v1", | |
api_key=HF_TOKEN | |
) | |
def chat_with_model(user_message): | |
try: | |
completion = client.chat.completions.create( | |
model="openai/gpt-oss-120b", | |
messages=[ | |
{"role": "user", "content": user_message} | |
], | |
max_tokens=200 | |
) | |
return completion.choices[0].message.content | |
except Exception as e: | |
return f"Error: {e}" | |
with gr.Blocks(fill_height=True) as demo: | |
gr.Markdown("# 💬 Chat with openai/gpt-oss-120b (via HF API)") | |
gr.Markdown("يتم التنفيذ على سيرفرات Hugging Face، وليس على موارد الـ Space.") | |
chatbot = gr.Chatbot(height=400) | |
user_input = gr.Textbox(label="اكتب رسالتك") | |
send_btn = gr.Button("إرسال") | |
def respond(history, message): | |
response = chat_with_model(message) | |
history.append((message, response)) | |
return history, "" | |
send_btn.click(respond, [chatbot, user_input], [chatbot, user_input]) | |
demo.launch() |