|
import spaces |
|
import gradio as gr |
|
import torch |
|
from PIL import Image |
|
from diffusers import DiffusionPipeline |
|
import random |
|
import os |
|
import json |
|
import io |
|
import uuid |
|
from gradio_client import Client as client_gradio |
|
from supabase import create_client, Client |
|
from datetime import datetime |
|
|
|
|
|
url: str = os.getenv('SUPABASE_URL') |
|
key: str = os.getenv('SUPABASE_KEY') |
|
supabase: Client = create_client(url, key) |
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") |
|
|
|
|
|
base_model = "black-forest-labs/FLUX.1-dev" |
|
pipe = DiffusionPipeline.from_pretrained( |
|
base_model, |
|
torch_dtype=torch.float16, |
|
use_safetensors=True |
|
) |
|
|
|
|
|
pipe.to("cuda") |
|
|
|
|
|
lora_models = { |
|
"studio": { |
|
"repo": "vcollos/Paula2", |
|
"weights": "Paula P.safetensors", |
|
"trigger_word": "" |
|
}, |
|
"Nanda": { |
|
"repo": "vcollos/Nanda", |
|
"weights": "lora.safetensors", |
|
"trigger_word": "" |
|
} |
|
} |
|
|
|
|
|
for name, details in lora_models.items(): |
|
try: |
|
pipe.load_lora_weights(details["repo"], weight_name=details["weights"], adapter_name=name) |
|
print(f"✅ LoRA {name} carregado") |
|
except Exception as e: |
|
print(f"❌ Erro ao carregar o LoRA {name}: {e}") |
|
|
|
|
|
MAX_SEED = 2**32 - 1 |
|
|
|
def upload_image_to_supabase(image, filename): |
|
""" Faz upload da imagem para o Supabase Storage e retorna a URL pública. """ |
|
img_bytes = io.BytesIO() |
|
image.save(img_bytes, format="PNG") |
|
img_bytes.seek(0) |
|
|
|
storage_path = f"images/{filename}" |
|
|
|
try: |
|
|
|
supabase.storage.from_("images").upload(storage_path, img_bytes.getvalue(), {"content-type": "image/png"}) |
|
|
|
|
|
base_url = f"{url}/storage/v1/object/public/images" |
|
return f"{base_url}/{filename}" |
|
except Exception as e: |
|
print(f"❌ Erro no upload da imagem: {e}") |
|
return None |
|
|
|
@spaces.GPU(duration=80) |
|
def run_lora(prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_option, lora_scale_1, lora_scale_2, progress=gr.Progress(track_tqdm=True)): |
|
if randomize_seed: |
|
seed = random.randint(0, MAX_SEED) |
|
generator = torch.Generator(device="cuda").manual_seed(seed) |
|
|
|
|
|
prompt_tokens = prompt.split()[:77] |
|
prompt = " ".join(prompt_tokens) |
|
|
|
|
|
selected_loras = [] |
|
adapter_weights = [] |
|
|
|
if lora_option == "studio": |
|
selected_loras.append("studio") |
|
adapter_weights.append(lora_scale_1) |
|
elif lora_option == "Nanda": |
|
selected_loras.append("Nanda") |
|
adapter_weights.append(lora_scale_2) |
|
elif lora_option == "Ambos": |
|
selected_loras = ["studio", "Nanda"] |
|
adapter_weights = [lora_scale_1, lora_scale_2] |
|
|
|
pipe.set_adapters(selected_loras, adapter_weights) |
|
|
|
|
|
if "Nanda" in selected_loras: |
|
prompt = f"{lora_models['Nanda']['trigger_word']} {prompt}" |
|
|
|
|
|
with torch.autocast("cuda"): |
|
image = pipe( |
|
prompt=prompt, |
|
num_inference_steps=steps, |
|
guidance_scale=cfg_scale, |
|
width=width, |
|
height=height, |
|
generator=generator |
|
).images[0] |
|
|
|
|
|
filename = f"image_{seed}_{datetime.utcnow().strftime('%Y%m%d%H%M%S')}.png" |
|
|
|
try: |
|
image_url = upload_image_to_supabase(image, filename) |
|
if image_url: |
|
print(f"✅ Imagem salva no Supabase: {image_url}") |
|
else: |
|
print("❌ Erro: URL da imagem retornou None") |
|
return image, seed |
|
except Exception as e: |
|
print(f"❌ Erro ao fazer upload da imagem: {e}") |
|
return image, seed |
|
|
|
|
|
try: |
|
response = supabase.table("images").insert({ |
|
"prompt": prompt, |
|
"cfg_scale": cfg_scale, |
|
"steps": steps, |
|
"seed": seed, |
|
"lora_option": lora_option, |
|
"lora_scale_1": lora_scale_1, |
|
"lora_scale_2": lora_scale_2, |
|
"image_url": image_url, |
|
"created_at": datetime.utcnow().isoformat() |
|
}).execute() |
|
|
|
if response.data: |
|
print("✅ Metadados salvos no Supabase") |
|
else: |
|
print("❌ Erro: Resposta vazia do Supabase") |
|
|
|
except Exception as e: |
|
print(f"❌ Erro ao salvar metadados no Supabase: {e}") |
|
|
|
return image, seed |
|
|
|
|
|
gr_theme = os.getenv("THEME") |
|
with gr.Blocks(theme=gr_theme) as app: |
|
gr.Markdown("# studio Image Generator") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
prompt = gr.TextArea(label="Prompt", placeholder="Digite um prompt (máx 77 caracteres)", lines=3) |
|
generate_button = gr.Button("Gerar") |
|
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5) |
|
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=32) |
|
width = gr.Slider(label="Width", minimum=256, maximum=1024, step=64, value=768) |
|
height = gr.Slider(label="Height", minimum=256, maximum=1024, step=64, value=1024) |
|
randomize_seed = gr.Checkbox(False, label="Randomize seed") |
|
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=556215326) |
|
lora_option = gr.Radio(["Nenhum", "studio", "Nanda", "Ambos"], label="Escolha o LoRA", value="Ambos") |
|
lora_scale_1 = gr.Slider(label="LoRA Scale (studio)", minimum=0, maximum=1, step=0.01, value=0.1) |
|
lora_scale_2 = gr.Slider(label="LoRA Scale (Nanda)", minimum=0, maximum=1, step=0.01, value=1) |
|
|
|
with gr.Column(scale=2): |
|
result = gr.Image(label="Generated Image") |
|
|
|
generate_button.click( |
|
run_lora, |
|
inputs=[prompt, cfg_scale, steps, randomize_seed, seed, width, height, lora_option, lora_scale_1, lora_scale_2], |
|
outputs=[result, seed], |
|
) |
|
|
|
app.queue() |
|
app.launch(share=True) |