|
import streamlit as st |
|
from diffusers import StableDiffusionPipeline |
|
import torch |
|
|
|
|
|
@st.cache_resource |
|
def load_pipeline(): |
|
model_name = "runwayml/stable-diffusion-v1-5" |
|
pipeline = StableDiffusionPipeline.from_pretrained( |
|
model_name, |
|
torch_dtype=torch.float16, |
|
use_auth_token=True |
|
) |
|
pipeline = pipeline.to("cuda" if torch.cuda.is_available() else "cpu") |
|
return pipeline |
|
|
|
pipeline = load_pipeline() |
|
|
|
|
|
st.title("🎨 Open Source Text-to-Image Generator") |
|
st.write("Generate images from text prompts using Stable Diffusion.") |
|
|
|
|
|
prompt = st.text_input("Enter your prompt:", placeholder="A futuristic cityscape at sunset") |
|
|
|
|
|
if st.button("Generate Image"): |
|
if prompt: |
|
with st.spinner("Generating image..."): |
|
try: |
|
|
|
result = pipeline(prompt, num_inference_steps=50, guidance_scale=7.5) |
|
image = result.images[0] |
|
|
|
|
|
st.image(image, caption="Generated Image", use_column_width=True) |
|
except Exception as e: |
|
st.error(f"An error occurred: {e}") |
|
else: |
|
st.warning("Please enter a prompt to generate an image.") |
|
|
|
|
|
st.sidebar.title("Settings") |
|
st.sidebar.write("Customize your generation:") |
|
guidance_scale = st.sidebar.slider("Guidance Scale", 5.0, 15.0, 7.5) |
|
num_inference_steps = st.sidebar.slider("Inference Steps", 10, 100, 50) |
|
|
|
|
|
if st.sidebar.button("Clear Cache"): |
|
st.cache_resource.clear() |
|
st.success("Cache cleared!") |
|
|