gemma-3-270m / app.py
user name
‘update’
69d0460
import os
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# ------------------------------
# Load model
# ------------------------------
#model_id = "gemma_3_270m_model" # your model folder or HF repo
model_id = "google/gemma-3-270m" # your model folder or HF repo
hf_token = os.environ.get("HF_TOKEN") # read from Hugging Face Secrets
tokenizer = AutoTokenizer.from_pretrained(
model_id,
use_auth_token=hf_token,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
model_id,
use_auth_token=hf_token,
trust_remote_code=True,
device_map="auto"
)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer
)
# ------------------------------
# Gradio interface
# ------------------------------
def generate_text(prompt, max_length=100):
"""Generate text from the model"""
output = pipe(prompt, max_length=max_length)
return output[0]['generated_text']
# Create Gradio interface
demo = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(label="Prompt", placeholder="Enter your text here..."),
gr.Slider(label="Max length", minimum=10, maximum=500, value=100)
],
outputs=gr.Textbox(label="Generated Text"),
title="Gemma-3-270M Text Generator",
description="Enter a prompt and the model will generate text."
)
# Launch the app
if __name__ == "__main__":
demo.launch()