Update app.py
Browse files
app.py
CHANGED
@@ -1,33 +1,42 @@
|
|
1 |
-
import
|
2 |
-
import time
|
3 |
-
import random
|
4 |
-
import logging
|
5 |
-
import traceback
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
import gradio as gr
|
|
|
9 |
from PIL import Image
|
|
|
|
|
|
|
|
|
10 |
import io
|
|
|
11 |
from datetime import datetime
|
12 |
-
from huggingface_hub import
|
13 |
|
14 |
-
#
|
|
|
15 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
16 |
logger = logging.getLogger(__name__)
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
#
|
25 |
-
|
26 |
-
MAX_SEED = 2**32-1
|
27 |
|
28 |
-
#
|
|
|
|
|
|
|
29 |
loras = [
|
30 |
-
# Super-Realism
|
31 |
{
|
32 |
"image": "https://huggingface.co/vcollos/VitorCollos/resolve/main/images/IMG_0047.WEBP",
|
33 |
"title": "Vitor",
|
@@ -39,7 +48,7 @@ loras = [
|
|
39 |
"image": "https://huggingface.co/vcollos/camila/resolve/main/images/1732936378531__000003000_1.jpg",
|
40 |
"title": "Camila",
|
41 |
"repo": "vcollos/camila",
|
42 |
-
"weights": "Camila.safetensors",
|
43 |
"trigger_word": "A photo of Camila"
|
44 |
},
|
45 |
{
|
@@ -86,59 +95,10 @@ loras = [
|
|
86 |
}
|
87 |
]
|
88 |
|
89 |
-
#
|
90 |
-
|
91 |
-
|
92 |
-
logger.info("Iniciando carregamento dos modelos...")
|
93 |
-
import torch
|
94 |
-
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
|
95 |
-
|
96 |
-
# Configurar dispositivo e tipo de dados
|
97 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
98 |
-
dtype = torch.bfloat16 if device == "cuda" else torch.float32
|
99 |
-
|
100 |
-
logger.info(f"Usando dispositivo: {device} com dtype: {dtype}")
|
101 |
-
|
102 |
-
# Carregar o autoencoder tiny para previsualização
|
103 |
-
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
104 |
-
|
105 |
-
# Carregar o VAE completo para a imagem final
|
106 |
-
good_vae = AutoencoderKL.from_pretrained(BASE_MODEL, subfolder="vae", torch_dtype=dtype).to(device)
|
107 |
-
|
108 |
-
# Carregar o pipeline principal
|
109 |
-
pipe = DiffusionPipeline.from_pretrained(
|
110 |
-
BASE_MODEL,
|
111 |
-
torch_dtype=dtype,
|
112 |
-
vae=taef1
|
113 |
-
).to(device)
|
114 |
-
|
115 |
-
# Criar pipeline de image-to-image
|
116 |
-
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
117 |
-
BASE_MODEL,
|
118 |
-
vae=good_vae,
|
119 |
-
transformer=pipe.transformer,
|
120 |
-
text_encoder=pipe.text_encoder,
|
121 |
-
tokenizer=pipe.tokenizer,
|
122 |
-
text_encoder_2=pipe.text_encoder_2,
|
123 |
-
tokenizer_2=pipe.tokenizer_2,
|
124 |
-
torch_dtype=dtype
|
125 |
-
).to(device)
|
126 |
-
|
127 |
-
logger.info("Modelos carregados com sucesso")
|
128 |
-
|
129 |
-
return {
|
130 |
-
"pipe": pipe,
|
131 |
-
"pipe_i2i": pipe_i2i,
|
132 |
-
"good_vae": good_vae,
|
133 |
-
"device": device,
|
134 |
-
"dtype": dtype
|
135 |
-
}
|
136 |
-
except Exception as e:
|
137 |
-
logger.error(f"Erro ao carregar modelos: {e}")
|
138 |
-
logger.error(traceback.format_exc())
|
139 |
-
raise e
|
140 |
|
141 |
-
# Classe para medir duração de operações
|
142 |
class TimeMeasure:
|
143 |
def __init__(self, name=""):
|
144 |
self.name = name
|
@@ -151,156 +111,46 @@ class TimeMeasure:
|
|
151 |
self.duration = time.time() - self.start
|
152 |
logger.info(f"🕒 {self.name}: {self.duration:.2f} segundos")
|
153 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
# Função para processar a seleção de modelos na interface
|
155 |
-
def update_selection(evt: gr.SelectData
|
156 |
selected_lora = loras[evt.index]
|
157 |
new_placeholder = f"Digite o prompt para {selected_lora['title']}, de preferência em inglês."
|
158 |
lora_repo = selected_lora["repo"]
|
159 |
updated_text = f"### Selecionado: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
|
160 |
-
|
161 |
-
# Ajustar dimensões conforme especificações do modelo
|
162 |
-
if "aspect" in selected_lora:
|
163 |
-
if selected_lora["aspect"] == "retrato":
|
164 |
-
width = 768
|
165 |
-
height = 1024
|
166 |
-
elif selected_lora["aspect"] == "paisagem":
|
167 |
-
width = 1024
|
168 |
-
height = 768
|
169 |
-
else:
|
170 |
-
width = 1024
|
171 |
-
height = 1024
|
172 |
|
173 |
return (
|
174 |
gr.update(placeholder=new_placeholder),
|
175 |
updated_text,
|
176 |
evt.index,
|
177 |
-
width,
|
178 |
-
height,
|
179 |
)
|
180 |
|
181 |
-
#
|
182 |
-
def generate_image(prompt, steps, cfg_scale, width, height, selected_index, randomize_seed, seed, lora_scale, input_image=None, image_strength=0.75, progress=gr.Progress()):
|
183 |
-
try:
|
184 |
-
if selected_index is None:
|
185 |
-
raise gr.Error("Por favor, selecione um modelo LoRA primeiro")
|
186 |
-
|
187 |
-
# Carregar modelos (apenas quando necessário)
|
188 |
-
models = load_models()
|
189 |
-
pipe = models["pipe"]
|
190 |
-
pipe_i2i = models["pipe_i2i"]
|
191 |
-
good_vae = models["good_vae"]
|
192 |
-
device = models["device"]
|
193 |
-
|
194 |
-
# Preparar prompt com trigger words
|
195 |
-
selected_lora = loras[selected_index]
|
196 |
-
lora_path = selected_lora["repo"]
|
197 |
-
trigger_word = selected_lora.get("trigger_word", "")
|
198 |
-
qualidade = "<flux.1-dev>"
|
199 |
-
|
200 |
-
if trigger_word:
|
201 |
-
trigger_position = selected_lora.get("trigger_position", "prepend")
|
202 |
-
if trigger_position == "prepend":
|
203 |
-
prompt_full = f"{trigger_word} {prompt} {qualidade}"
|
204 |
-
else:
|
205 |
-
prompt_full = f"{prompt} {trigger_word} {qualidade}"
|
206 |
-
else:
|
207 |
-
prompt_full = f"{prompt} {qualidade}"
|
208 |
-
|
209 |
-
logger.info(f"Prompt completo: {prompt_full}")
|
210 |
-
|
211 |
-
# Randomizar seed se necessário
|
212 |
-
if randomize_seed:
|
213 |
-
seed = random.randint(0, MAX_SEED)
|
214 |
-
|
215 |
-
# Configurar gerador
|
216 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
217 |
-
progress(0, desc="Preparando modelo...")
|
218 |
-
|
219 |
-
# Descarregar pesos LoRA anteriores e limpar cache
|
220 |
-
with TimeMeasure("Descarregando modelos anteriores"):
|
221 |
-
pipe.unload_lora_weights() if hasattr(pipe, 'unload_lora_weights') else None
|
222 |
-
pipe_i2i.unload_lora_weights() if hasattr(pipe_i2i, 'unload_lora_weights') else None
|
223 |
-
torch.cuda.empty_cache()
|
224 |
-
|
225 |
-
# Carregar pesos LoRA
|
226 |
-
with TimeMeasure(f"Carregando LoRA {selected_lora['title']}"):
|
227 |
-
pipeline_to_use = pipe_i2i if input_image is not None else pipe
|
228 |
-
weight_name = selected_lora.get("weights", None)
|
229 |
-
|
230 |
-
try:
|
231 |
-
pipeline_to_use.load_lora_weights(
|
232 |
-
lora_path,
|
233 |
-
weight_name=weight_name,
|
234 |
-
low_cpu_mem_usage=True
|
235 |
-
)
|
236 |
-
logger.info(f"LoRA carregado: {weight_name}")
|
237 |
-
except Exception as e:
|
238 |
-
logger.error(f"Erro ao carregar LoRA: {e}")
|
239 |
-
raise gr.Error(f"Erro ao carregar LoRA: {str(e)}")
|
240 |
-
|
241 |
-
# Gerar imagem
|
242 |
-
with TimeMeasure("Gerando imagem"):
|
243 |
-
progress(0.2, desc=f"Gerando imagem com {steps} passos...")
|
244 |
-
|
245 |
-
if input_image is not None:
|
246 |
-
# Modo image-to-image
|
247 |
-
from diffusers.utils import load_image
|
248 |
-
image_input = load_image(input_image)
|
249 |
-
|
250 |
-
result = pipe_i2i(
|
251 |
-
prompt=prompt_full,
|
252 |
-
image=image_input,
|
253 |
-
strength=image_strength,
|
254 |
-
num_inference_steps=steps,
|
255 |
-
guidance_scale=cfg_scale,
|
256 |
-
width=width,
|
257 |
-
height=height,
|
258 |
-
generator=generator,
|
259 |
-
joint_attention_kwargs={"scale": lora_scale},
|
260 |
-
)
|
261 |
-
final_image = result.images[0]
|
262 |
-
else:
|
263 |
-
# Modo text-to-image com visualização progressiva
|
264 |
-
final_image = None
|
265 |
-
|
266 |
-
# Definir função de callback para atualizar o progresso
|
267 |
-
def callback_fn(i, t, latents):
|
268 |
-
progress((i + 1) / steps, desc=f"Passo {i+1}/{steps}")
|
269 |
-
return True
|
270 |
-
|
271 |
-
# Generate image
|
272 |
-
result = pipe(
|
273 |
-
prompt=prompt_full,
|
274 |
-
num_inference_steps=steps,
|
275 |
-
guidance_scale=cfg_scale,
|
276 |
-
width=width,
|
277 |
-
height=height,
|
278 |
-
generator=generator,
|
279 |
-
joint_attention_kwargs={"scale": lora_scale},
|
280 |
-
callback=callback_fn,
|
281 |
-
callback_steps=1
|
282 |
-
)
|
283 |
-
final_image = result.images[0]
|
284 |
-
|
285 |
-
# Limpar cache após geração
|
286 |
-
torch.cuda.empty_cache()
|
287 |
-
|
288 |
-
return final_image, seed
|
289 |
-
|
290 |
-
except Exception as e:
|
291 |
-
logger.error(f"Erro ao gerar imagem: {e}")
|
292 |
-
logger.error(traceback.format_exc())
|
293 |
-
raise gr.Error(f"Erro: {str(e)}")
|
294 |
-
|
295 |
-
# Verifica modelo personalizado
|
296 |
def add_custom_lora(custom_lora):
|
297 |
global loras
|
298 |
|
299 |
if not custom_lora:
|
300 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None
|
301 |
|
302 |
try:
|
303 |
-
# Processar o link ou ID do modelo
|
304 |
model_id = custom_lora
|
305 |
if model_id.startswith("https://huggingface.co/"):
|
306 |
model_id = model_id.replace("https://huggingface.co/", "")
|
@@ -312,7 +162,6 @@ def add_custom_lora(custom_lora):
|
|
312 |
|
313 |
# Verificar card do modelo
|
314 |
try:
|
315 |
-
from huggingface_hub import ModelCard
|
316 |
model_card = ModelCard.load(model_id)
|
317 |
base_model = model_card.data.get("base_model")
|
318 |
|
@@ -395,7 +244,6 @@ def add_custom_lora(custom_lora):
|
|
395 |
gr.Gallery(value=[(item["image"], item["title"]) for item in loras]),
|
396 |
f"Modelo: {title}",
|
397 |
existing_item_index,
|
398 |
-
trigger_word if trigger_word else "",
|
399 |
)
|
400 |
|
401 |
except Exception as e:
|
@@ -407,189 +255,239 @@ def add_custom_lora(custom_lora):
|
|
407 |
gr.update(),
|
408 |
"",
|
409 |
None,
|
410 |
-
"",
|
411 |
)
|
412 |
|
413 |
def remove_custom_lora():
|
414 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None
|
415 |
|
416 |
-
#
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
embed_radius='*radius_lg',
|
427 |
-
shadow_drop='0 1px 2px rgba(0, 0, 0, 0.1)',
|
428 |
-
shadow_drop_lg='0 1px 2px rgba(0, 0, 0, 0.1)',
|
429 |
-
shadow_inset='0 1px 2px rgba(0, 0, 0, 0.1)',
|
430 |
-
shadow_spread='0 1px 2px rgba(0, 0, 0, 0.1)',
|
431 |
-
shadow_spread_dark='0 1px 2px rgba(0, 0, 0, 0.1)',
|
432 |
-
block_radius='*radius_lg',
|
433 |
-
block_shadow='*shadow_drop',
|
434 |
-
container_radius='*radius_lg'
|
435 |
-
)
|
436 |
-
|
437 |
-
# CSS personalizado
|
438 |
-
css = """
|
439 |
-
#group_with_padding {
|
440 |
-
padding: 20px;
|
441 |
-
background-color: #f5f5f5;
|
442 |
-
border: 1px solid #ccc;
|
443 |
-
}
|
444 |
-
|
445 |
-
#padded_text {
|
446 |
-
padding: 10px;
|
447 |
-
background-color: #eef;
|
448 |
-
border-radius: 5px;
|
449 |
-
font-size: 16px;
|
450 |
-
}
|
451 |
-
|
452 |
-
.custom_lora_card {
|
453 |
-
padding: 10px;
|
454 |
-
background-color: #f5f5f5;
|
455 |
-
border-radius: 10px;
|
456 |
-
margin-top: 10px;
|
457 |
-
}
|
458 |
-
|
459 |
-
.card_internal {
|
460 |
-
display: flex;
|
461 |
-
align-items: center;
|
462 |
-
margin-top: 10px;
|
463 |
-
}
|
464 |
-
|
465 |
-
.card_internal img {
|
466 |
-
margin-right: 15px;
|
467 |
-
border-radius: 5px;
|
468 |
-
}
|
469 |
-
"""
|
470 |
-
|
471 |
-
# Interface principal
|
472 |
-
with gr.Blocks(theme=collos, css=css) as interface:
|
473 |
-
# Logo
|
474 |
-
title = gr.HTML(
|
475 |
-
"""<img src="https://huggingface.co/spaces/vcollos/Uniodonto/resolve/main/logo/logo_collos_3.png" alt="Logo" style="display: block; margin: 0 auto; padding: 5px 0px 20px 0px; width: 200px;" />""",
|
476 |
-
elem_id="title",
|
477 |
-
)
|
478 |
|
479 |
-
#
|
480 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
481 |
|
482 |
-
#
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
|
|
|
|
499 |
)
|
|
|
|
|
|
|
|
|
500 |
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
result = gr.Image(label="Imagem Gerada", type="pil")
|
519 |
-
seed_output = gr.Number(label="Seed", precision=0)
|
520 |
-
|
521 |
-
# Configurações avançadas
|
522 |
-
with gr.Row():
|
523 |
-
with gr.Accordion("Configurações Avançadas", open=False):
|
524 |
-
with gr.Row():
|
525 |
-
input_image = gr.Image(label="Imagem de Referência (opcional)", type="filepath")
|
526 |
-
image_strength = gr.Slider(
|
527 |
-
label="Força da Imagem Original",
|
528 |
-
info="Valores menores preservam mais da imagem original",
|
529 |
-
minimum=0.1,
|
530 |
-
maximum=1.0,
|
531 |
-
step=0.01,
|
532 |
-
value=0.75
|
533 |
-
)
|
534 |
|
535 |
-
|
536 |
-
|
537 |
-
cfg_scale = gr.Slider(label="Escala de Orientação (CFG)", minimum=1, maximum=20, step=0.5, value=3.0)
|
538 |
-
steps = gr.Slider(label="Passos de Inferência", minimum=1, maximum=50, step=1, value=32)
|
539 |
|
540 |
-
|
541 |
-
|
542 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
543 |
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
lora_scale = gr.Slider(label="Intensidade do LoRA", minimum=0, maximum=3, step=0.01, value=1.20)
|
548 |
-
|
549 |
-
# Eventos
|
550 |
-
gallery.select(
|
551 |
-
update_selection,
|
552 |
-
inputs=[width, height],
|
553 |
-
outputs=[prompt, selected_info, selected_index, width, height]
|
554 |
-
)
|
555 |
-
|
556 |
-
custom_lora.change(
|
557 |
-
add_custom_lora,
|
558 |
-
inputs=[custom_lora],
|
559 |
-
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
|
560 |
-
)
|
561 |
-
|
562 |
-
custom_lora_button.click(
|
563 |
-
remove_custom_lora,
|
564 |
-
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
|
565 |
-
)
|
566 |
|
567 |
-
|
568 |
-
|
569 |
-
randomize_seed, seed, lora_scale, input_image, image_strength
|
570 |
-
]
|
571 |
|
572 |
-
|
|
|
573 |
|
574 |
-
|
575 |
-
prompt.submit(generate_image, inputs=generate_inputs, outputs=generate_outputs)
|
576 |
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
581 |
|
582 |
-
|
583 |
-
|
584 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
585 |
|
586 |
-
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
591 |
|
592 |
-
#
|
593 |
-
|
594 |
-
|
595 |
-
app.queue(concurrency_count=1).launch(debug=True)
|
|
|
1 |
+
import spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import gradio as gr
|
3 |
+
import torch
|
4 |
from PIL import Image
|
5 |
+
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
|
6 |
+
import random
|
7 |
+
import os
|
8 |
+
import json
|
9 |
import io
|
10 |
+
import time
|
11 |
from datetime import datetime
|
12 |
+
from huggingface_hub import HfFileSystem, ModelCard
|
13 |
|
14 |
+
# Configuração de Logging
|
15 |
+
import logging
|
16 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
+
# Supabase (opcional)
|
20 |
+
try:
|
21 |
+
from supabase import create_client, Client
|
22 |
+
url: str = os.getenv('SUPABASE_URL')
|
23 |
+
key: str = os.getenv('SUPABASE_KEY')
|
24 |
+
supabase: Client = create_client(url, key) if url and key else None
|
25 |
+
supabase_enabled = True if supabase else False
|
26 |
+
logger.info("Supabase inicializado" if supabase_enabled else "Supabase não configurado")
|
27 |
+
except Exception as e:
|
28 |
+
logger.warning(f"Erro ao inicializar Supabase: {e}")
|
29 |
+
supabase_enabled = False
|
30 |
+
supabase = None
|
31 |
|
32 |
+
# Obtém token da Hugging Face
|
33 |
+
hf_token = os.getenv("HF_TOKEN")
|
|
|
34 |
|
35 |
+
# Seed máxima
|
36 |
+
MAX_SEED = 2**32 - 1
|
37 |
+
|
38 |
+
# Carregar modelos LoRA
|
39 |
loras = [
|
|
|
40 |
{
|
41 |
"image": "https://huggingface.co/vcollos/VitorCollos/resolve/main/images/IMG_0047.WEBP",
|
42 |
"title": "Vitor",
|
|
|
48 |
"image": "https://huggingface.co/vcollos/camila/resolve/main/images/1732936378531__000003000_1.jpg",
|
49 |
"title": "Camila",
|
50 |
"repo": "vcollos/camila",
|
51 |
+
"weights": "Camila.safetensors",
|
52 |
"trigger_word": "A photo of Camila"
|
53 |
},
|
54 |
{
|
|
|
95 |
}
|
96 |
]
|
97 |
|
98 |
+
# Inicializar modelo
|
99 |
+
base_model = "black-forest-labs/FLUX.1-dev"
|
100 |
+
logger.info(f"Inicializando modelo base: {base_model}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
|
|
102 |
class TimeMeasure:
|
103 |
def __init__(self, name=""):
|
104 |
self.name = name
|
|
|
111 |
self.duration = time.time() - self.start
|
112 |
logger.info(f"🕒 {self.name}: {self.duration:.2f} segundos")
|
113 |
|
114 |
+
# Upload de imagem para o Supabase (se configurado)
|
115 |
+
def upload_image_to_supabase(image, filename):
|
116 |
+
if not supabase_enabled:
|
117 |
+
return None
|
118 |
+
|
119 |
+
img_bytes = io.BytesIO()
|
120 |
+
image.save(img_bytes, format="PNG")
|
121 |
+
img_bytes.seek(0)
|
122 |
+
|
123 |
+
storage_path = f"images/{filename}"
|
124 |
+
|
125 |
+
try:
|
126 |
+
supabase.storage.from_("images").upload(storage_path, img_bytes.getvalue(), {"content-type": "image/png"})
|
127 |
+
base_url = f"{url}/storage/v1/object/public/images"
|
128 |
+
return f"{base_url}/{storage_path}"
|
129 |
+
except Exception as e:
|
130 |
+
logger.error(f"Erro no upload da imagem: {e}")
|
131 |
+
return None
|
132 |
+
|
133 |
# Função para processar a seleção de modelos na interface
|
134 |
+
def update_selection(evt: gr.SelectData):
|
135 |
selected_lora = loras[evt.index]
|
136 |
new_placeholder = f"Digite o prompt para {selected_lora['title']}, de preferência em inglês."
|
137 |
lora_repo = selected_lora["repo"]
|
138 |
updated_text = f"### Selecionado: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
return (
|
141 |
gr.update(placeholder=new_placeholder),
|
142 |
updated_text,
|
143 |
evt.index,
|
|
|
|
|
144 |
)
|
145 |
|
146 |
+
# Carrega modelo personalizado
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
def add_custom_lora(custom_lora):
|
148 |
global loras
|
149 |
|
150 |
if not custom_lora:
|
151 |
+
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None
|
152 |
|
153 |
try:
|
|
|
154 |
model_id = custom_lora
|
155 |
if model_id.startswith("https://huggingface.co/"):
|
156 |
model_id = model_id.replace("https://huggingface.co/", "")
|
|
|
162 |
|
163 |
# Verificar card do modelo
|
164 |
try:
|
|
|
165 |
model_card = ModelCard.load(model_id)
|
166 |
base_model = model_card.data.get("base_model")
|
167 |
|
|
|
244 |
gr.Gallery(value=[(item["image"], item["title"]) for item in loras]),
|
245 |
f"Modelo: {title}",
|
246 |
existing_item_index,
|
|
|
247 |
)
|
248 |
|
249 |
except Exception as e:
|
|
|
255 |
gr.update(),
|
256 |
"",
|
257 |
None,
|
|
|
258 |
)
|
259 |
|
260 |
def remove_custom_lora():
|
261 |
+
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None
|
262 |
|
263 |
+
# Função principal para gerar imagem
|
264 |
+
@spaces.GPU(duration=60)
|
265 |
+
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
|
266 |
+
try:
|
267 |
+
if selected_index is None:
|
268 |
+
raise gr.Error("Selecione um modelo para continuar.")
|
269 |
+
|
270 |
+
# Inicializa o timestamp
|
271 |
+
start_time = time.time()
|
272 |
+
logger.info(f"Iniciando geração com modelo: {loras[selected_index]['title']}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
273 |
|
274 |
+
# Configuração do modelo
|
275 |
+
with TimeMeasure("Carregando modelo base"):
|
276 |
+
# Usar bfloat16 para economizar memória
|
277 |
+
pipe = DiffusionPipeline.from_pretrained(
|
278 |
+
base_model,
|
279 |
+
torch_dtype=torch.float16,
|
280 |
+
use_safetensors=True
|
281 |
+
)
|
282 |
+
pipe.to("cuda")
|
283 |
+
|
284 |
+
selected_lora = loras[selected_index]
|
285 |
+
lora_path = selected_lora["repo"]
|
286 |
+
lora_weights = selected_lora.get("weights")
|
287 |
+
trigger_word = selected_lora.get("trigger_word", "")
|
288 |
+
qualidade = "<flux.1-dev>"
|
289 |
|
290 |
+
# Adiciona trigger word ao prompt
|
291 |
+
if trigger_word:
|
292 |
+
prompt_full = f"{trigger_word} {prompt} {qualidade}"
|
293 |
+
else:
|
294 |
+
prompt_full = f"{prompt} {qualidade}"
|
295 |
+
|
296 |
+
# Randomiza a seed se necessário
|
297 |
+
if randomize_seed:
|
298 |
+
seed = random.randint(0, MAX_SEED)
|
299 |
+
|
300 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
301 |
+
|
302 |
+
# Carrega o LoRA específico
|
303 |
+
with TimeMeasure(f"Carregando LoRA {selected_lora['title']}"):
|
304 |
+
try:
|
305 |
+
pipe.load_lora_weights(
|
306 |
+
lora_path,
|
307 |
+
weight_name=lora_weights,
|
308 |
+
adapter_name="lora"
|
309 |
)
|
310 |
+
pipe.set_adapters(["lora"], adapter_weights=[lora_scale])
|
311 |
+
except Exception as e:
|
312 |
+
logger.error(f"Erro ao carregar LoRA: {e}")
|
313 |
+
raise gr.Error(f"Erro ao carregar LoRA: {str(e)}")
|
314 |
|
315 |
+
# Gera a imagem
|
316 |
+
with TimeMeasure("Gerando imagem"):
|
317 |
+
result = pipe(
|
318 |
+
prompt=prompt_full,
|
319 |
+
num_inference_steps=steps,
|
320 |
+
guidance_scale=cfg_scale,
|
321 |
+
width=width,
|
322 |
+
height=height,
|
323 |
+
generator=generator
|
324 |
+
)
|
325 |
+
image = result.images[0]
|
326 |
+
|
327 |
+
# Salva a imagem no Supabase se configurado
|
328 |
+
if supabase_enabled:
|
329 |
+
try:
|
330 |
+
filename = f"image_{seed}_{datetime.utcnow().strftime('%Y%m%d%H%M%S')}.png"
|
331 |
+
image_url = upload_image_to_supabase(image, filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
332 |
|
333 |
+
if image_url:
|
334 |
+
logger.info(f"Imagem salva no Supabase: {image_url}")
|
|
|
|
|
335 |
|
336 |
+
# Salva metadados
|
337 |
+
supabase.table("images").insert({
|
338 |
+
"prompt": prompt_full,
|
339 |
+
"cfg_scale": cfg_scale,
|
340 |
+
"steps": steps,
|
341 |
+
"seed": seed,
|
342 |
+
"lora_scale": lora_scale,
|
343 |
+
"image_url": image_url,
|
344 |
+
"created_at": datetime.utcnow().isoformat()
|
345 |
+
}).execute()
|
346 |
|
347 |
+
logger.info("Metadados salvos no Supabase")
|
348 |
+
except Exception as e:
|
349 |
+
logger.error(f"Erro ao salvar no Supabase: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
350 |
|
351 |
+
elapsed_time = time.time() - start_time
|
352 |
+
logger.info(f"Imagem gerada em {elapsed_time:.2f} segundos")
|
|
|
|
|
353 |
|
354 |
+
# Limpa memória CUDA
|
355 |
+
torch.cuda.empty_cache()
|
356 |
|
357 |
+
return image, seed
|
|
|
358 |
|
359 |
+
except Exception as e:
|
360 |
+
logger.error(f"Erro na geração: {e}")
|
361 |
+
raise gr.Error(str(e))
|
362 |
+
|
363 |
+
# Interface Gradio
|
364 |
+
collos = gr.themes.Soft(
|
365 |
+
primary_hue="gray",
|
366 |
+
secondary_hue="stone",
|
367 |
+
neutral_hue="slate",
|
368 |
+
radius_size=gr.themes.Size(lg="15px", md="8px", sm="6px", xl="16px", xs="4px", xxl="24px", xxs="2px")
|
369 |
+
).set(
|
370 |
+
body_background_fill='*primary_100',
|
371 |
+
embed_radius='*radius_lg',
|
372 |
+
shadow_drop='0 1px 2px rgba(0, 0, 0, 0.1)',
|
373 |
+
shadow_drop_lg='0 1px 2px rgba(0, 0, 0, 0.1)',
|
374 |
+
shadow_inset='0 1px 2px rgba(0, 0, 0, 0.1)',
|
375 |
+
shadow_spread='0 1px 2px rgba(0, 0, 0, 0.1)',
|
376 |
+
shadow_spread_dark='0 1px 2px rgba(0, 0, 0, 0.1)',
|
377 |
+
block_radius='*radius_lg',
|
378 |
+
block_shadow='*shadow_drop',
|
379 |
+
container_radius='*radius_lg'
|
380 |
+
)
|
381 |
+
|
382 |
+
css = """
|
383 |
+
.custom_lora_card {
|
384 |
+
padding: 10px;
|
385 |
+
background-color: #f5f5f5;
|
386 |
+
border-radius: 10px;
|
387 |
+
margin-top: 10px;
|
388 |
+
}
|
389 |
+
|
390 |
+
.card_internal {
|
391 |
+
display: flex;
|
392 |
+
align-items: center;
|
393 |
+
margin-top: 10px;
|
394 |
+
}
|
395 |
+
|
396 |
+
.card_internal img {
|
397 |
+
margin-right: 15px;
|
398 |
+
border-radius: 5px;
|
399 |
+
}
|
400 |
+
"""
|
401 |
+
|
402 |
+
with gr.Blocks(theme=collos, css=css) as app:
|
403 |
+
# Logo
|
404 |
+
title = gr.HTML(
|
405 |
+
"""<img src="https://huggingface.co/spaces/vcollos/Uniodonto/resolve/main/logo/logo_collos_3.png" alt="Logo" style="display: block; margin: 0 auto; padding: 5px 0px 20px 0px; width: 200px;" />""",
|
406 |
+
elem_id="title",
|
407 |
+
)
|
408 |
+
|
409 |
+
selected_index = gr.State(None)
|
410 |
+
|
411 |
+
with gr.Row():
|
412 |
+
with gr.Column(scale=3):
|
413 |
+
prompt = gr.Textbox(label="Prompt", lines=2, placeholder="Selecione um modelo primeiro")
|
414 |
+
with gr.Column(scale=1):
|
415 |
+
generate_button = gr.Button("Gerar Imagem", variant="primary", elem_id="cta")
|
416 |
|
417 |
+
with gr.Row():
|
418 |
+
with gr.Column():
|
419 |
+
selected_info = gr.Markdown("")
|
420 |
+
gallery = gr.Gallery(
|
421 |
+
label="Modelos Disponíveis",
|
422 |
+
value=[(item["image"], item["title"]) for item in loras],
|
423 |
+
allow_preview=False,
|
424 |
+
columns=3,
|
425 |
+
show_share_button=False
|
426 |
+
)
|
427 |
|
428 |
+
# Seção LoRA personalizado
|
429 |
+
with gr.Group():
|
430 |
+
custom_lora = gr.Textbox(
|
431 |
+
label="Adicionar Modelo Externo",
|
432 |
+
placeholder="Nome do modelo ou URL (ex: vcollos/VitorCollos)"
|
433 |
+
)
|
434 |
+
gr.Markdown(
|
435 |
+
"[Ver modelos FLUX no Hugging Face](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)",
|
436 |
+
elem_id="lora_list"
|
437 |
+
)
|
438 |
+
|
439 |
+
# Informações do modelo personalizado
|
440 |
+
custom_lora_info = gr.HTML(visible=False)
|
441 |
+
custom_lora_button = gr.Button("Remover Modelo Externo", visible=False)
|
442 |
+
|
443 |
+
with gr.Column():
|
444 |
+
result = gr.Image(label="Imagem Gerada")
|
445 |
+
seed_output = gr.Number(label="Seed", precision=0)
|
446 |
+
|
447 |
+
with gr.Row():
|
448 |
+
with gr.Accordion("Configurações Avançadas", open=False):
|
449 |
+
with gr.Column():
|
450 |
+
with gr.Row():
|
451 |
+
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
|
452 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=32)
|
453 |
+
|
454 |
+
with gr.Row():
|
455 |
+
width = gr.Slider(label="Largura", minimum=256, maximum=1536, step=64, value=1024)
|
456 |
+
height = gr.Slider(label="Altura", minimum=256, maximum=1536, step=64, value=1024)
|
457 |
+
|
458 |
+
with gr.Row():
|
459 |
+
randomize_seed = gr.Checkbox(True, label="Seed Aleatória")
|
460 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
|
461 |
+
lora_scale = gr.Slider(label="Intensidade do LoRA", minimum=0, maximum=3, step=0.01, value=1.20)
|
462 |
+
|
463 |
+
# Eventos
|
464 |
+
gallery.select(
|
465 |
+
update_selection,
|
466 |
+
inputs=[],
|
467 |
+
outputs=[prompt, selected_info, selected_index]
|
468 |
+
)
|
469 |
+
|
470 |
+
custom_lora.change(
|
471 |
+
add_custom_lora,
|
472 |
+
inputs=[custom_lora],
|
473 |
+
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index]
|
474 |
+
)
|
475 |
+
|
476 |
+
custom_lora_button.click(
|
477 |
+
remove_custom_lora,
|
478 |
+
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index]
|
479 |
+
)
|
480 |
+
|
481 |
+
generate_inputs = [
|
482 |
+
prompt, cfg_scale, steps, selected_index,
|
483 |
+
randomize_seed, seed, width, height, lora_scale
|
484 |
+
]
|
485 |
+
|
486 |
+
generate_outputs = [result, seed_output]
|
487 |
+
|
488 |
+
generate_button.click(run_lora, inputs=generate_inputs, outputs=generate_outputs)
|
489 |
+
prompt.submit(run_lora, inputs=generate_inputs, outputs=generate_outputs)
|
490 |
|
491 |
+
# Iniciar o app
|
492 |
+
app.queue()
|
493 |
+
app.launch()
|
|