chatbot / app.py
Rafay17's picture
Update app.py
240e8bf verified
raw
history blame contribute delete
971 Bytes
import gradio as gr
import torch
from transformers import AutoModel, AutoTokenizer
# Load the model and tokenizer
model_name = "Rafay17/Llama3.2_1b_customModle2"
model = AutoModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Define a function to process input text
def generate_output(input_text):
# Tokenize the input text
inputs = tokenizer(input_text, return_tensors="pt")
# Forward pass to get model outputs
with torch.no_grad():
outputs = model(**inputs)
# You can return the outputs as needed; here, we're returning the last hidden state
return outputs.last_hidden_state
# Create Gradio interface
iface = gr.Interface(
fn=generate_output,
inputs=gr.Textbox(label="Input Text"),
outputs=gr.Textbox(label="Model Output"),
title="Text Processing with Llama Model",
description="Enter text to process it with the Llama3.2 model."
)
# Launch the app
iface.launch()