prasenjeet099's picture
Update app.py
6e7a20b verified
raw
history blame contribute delete
815 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the model and tokenizer
model_name = "zltd/zbrain_llm_0.1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Define a function for text generation
def generate_text(prompt, max_length=100):
inputs = tokenizer(prompt, return_tensors="pt")
output = model.generate(**inputs, max_length=max_length)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_text
# Create a Gradio interface
demo = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="Text Generation with Custom Model",
description="Enter a prompt to generate text.",
)
if __name__ == "__main__":
demo.launch()