ArabianLLM2 / app.py
Useph El
udpate app.py
751500d
raw
history blame contribute delete
787 Bytes
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
# Load the model and tokenizer
model_name = "silma-ai/SILMA-9B-Instruct-v1.0"
model_name = "rombodawg/Rombos-LLM-V2.5-Qwen-72b"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=200, num_return_sequences=1)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Gradio Interface
interface = gr.Interface(
fn=generate_response,
inputs="text",
outputs="text",
title="SILMA-9B Instruct",
description="Provide a prompt, and the model generates a response."
)
interface.launch()