File size: 971 Bytes
240e8bf
 
e8ec2f9
93396ac
 
e8ec2f9
 
 
a3af99d
240e8bf
 
 
 
 
 
 
 
 
 
 
a3af99d
240e8bf
 
 
 
 
 
 
 
93396ac
240e8bf
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
import torch
from transformers import AutoModel, AutoTokenizer

# Load the model and tokenizer
model_name = "Rafay17/Llama3.2_1b_customModle2"
model = AutoModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Define a function to process input text
def generate_output(input_text):
    # Tokenize the input text
    inputs = tokenizer(input_text, return_tensors="pt")
    
    # Forward pass to get model outputs
    with torch.no_grad():
        outputs = model(**inputs)
    
    # You can return the outputs as needed; here, we're returning the last hidden state
    return outputs.last_hidden_state

# Create Gradio interface
iface = gr.Interface(
    fn=generate_output,
    inputs=gr.Textbox(label="Input Text"),
    outputs=gr.Textbox(label="Model Output"),
    title="Text Processing with Llama Model",
    description="Enter text to process it with the Llama3.2 model."
)

# Launch the app
iface.launch()