Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
model_id = 'dicta-il/dictalm-7b-instruct' | |
# ืืฆืืจืช API ืฉื Hugging Face | |
api_key = os.getenv('HUGGINGFACE_API_KEY', '') | |
generator = InferenceClient(model=model_id, token=api_key) | |
# ืคืื ืงืฆืืืช ืืฆืืจืช ืืืงืกื | |
def chat_with_model(history): | |
prompt = history[-1]["content"] | |
try: | |
response = generator.text_generation(prompt, do_sample=True, min_length=20, max_length=64, top_k=40, top_p=0.92, temperature=0.9) | |
result = response.get("generated_text", "Error: No generated text found") | |
except Exception as e: | |
result = f"Error: {str(e)}" | |
return history + [{"role": "bot", "content": result}] | |
# ืืฆืืจืช ืืืฉืง ืืชืงืื ืขื Gradio ืืฆืืจืช ืฆ'ื-ืืื ืืกืื ืื ืืงืืื | |
with gr.Blocks(theme="default") as demo: | |
gr.HTML(""" | |
<div style="background-color: #f5f5f5; padding: 20px; text-align: center;"> | |
<h1 style="color: #003366; font-family: Arial, sans-serif;">ืฆ'ืื ืขื ืืืื DictaLM</h1> | |
<p style="font-family: Arial, sans-serif; color: #333;">ืืจืืืื ืืืืื ืืฆ'ืื ืืืื ืืจืืงืืืื ืฉืื ื, ืืืืคืฉืจ ืืื ืืืชื ืกืืช ืืฉืืื ืขื ืืืื AI ืืชืงืื.</p> | |
</div> | |
""") | |
chatbot = gr.Chatbot(label="ืฆ'ืื ืขื ืืืื DictaLM", type="messages") | |
with gr.Row(): | |
user_input = gr.Textbox(placeholder="ืืื ืก ืืช ืืืืืขื ืฉืื ืืื...", label="", lines=1) | |
send_button = gr.Button("ืฉืื") | |
def user_chat(history, message): | |
return history + [{"role": "user", "content": message}], "" | |
# ืฉืืืืช ืืืืืขื ืื ืืืืืฆื ืขื Enter ืืื ืขื ืืื ืืืืฆื ืขื ืืคืชืืจ "ืฉืื" | |
user_input.submit(fn=user_chat, inputs=[chatbot, user_input], outputs=[chatbot, user_input], queue=False).then( | |
fn=chat_with_model, inputs=chatbot, outputs=chatbot | |
) | |
send_button.click(fn=user_chat, inputs=[chatbot, user_input], outputs=[chatbot, user_input], queue=False).then( | |
fn=chat_with_model, inputs=chatbot, outputs=chatbot | |
) | |
demo.launch() |