import os import gradio as gr import numpy as np import random from huggingface_hub import AsyncInferenceClient from translatepy import Translator import requests import re import asyncio from PIL import Image translator = Translator() HF_TOKEN = os.environ.get("HF_TOKEN", None) basemodel = "black-forest-labs/FLUX.1-dev" MAX_SEED = np.iinfo(np.int32).max CSS = """ /* Ocultar el footer predeterminado */ footer { visibility: hidden; } /* Fondo oscuro y futurista */ body { background-color: #0f0f0f; color: #e0e0e0; font-family: 'Roboto', sans-serif; } /* Encabezados con efecto neón rosa */ h1, h2, h3, h4, h5, h6 { color: #ff1493; text-shadow: 0 0 10px #ff1493, 0 0 20px #ff1493; } /* -- ETIQUETAS (labels) Y TEXTOS DE LOS CONTROLES -- */ /* Forzamos el color de todos los labels, incluidas las etiquetas de sliders y texto explicativo */ .wrap label, .label, .gr-label { color: #ff1493 !important; text-shadow: 0 0 5px #ff1493; font-weight: bold; } /* -- BOTONES, CONTENEDORES, INPUTS Y SLIDERS -- */ /* Contenedores y elementos base */ .gradio-container, .gr-input, .gr-slider { border: 1px solid #00ffff; background-color: #1a1a1a; color: #e0e0e0; border-radius: 5px; } /* Botones con texto rosa y bordes azules */ .gr-button, .gradio-button { border: 1px solid #00ffff; background-color: #1a1a1a; color: #ff1493; text-shadow: 0 0 5px #ff1493; border-radius: 5px; font-weight: bold; transition: all 0.3s ease; } /* Efecto hover para botones */ .gr-button:hover, .gradio-button:hover { background-color: #00ffff; color: #0f0f0f; text-shadow: none; } /* Inputs y sliders con estética futurista */ .gr-textbox, .gr-slider { background-color: #1a1a1a; border: 1px solid #00ffff; color: #e0e0e0; } /* Ajustar tooltip o numeritos del slider (si Gradio los muestra con noUiSlider) */ .noUi-tooltip { background: #00ffff !important; color: #0f0f0f !important; } /* -- ENLACES Y KO-FI -- */ a { color: #ff1493; text-decoration: none; border-bottom: 1px solid #ff1493; transition: color 0.3s; } a:hover { color: #00ffff; } /* Efecto en imágenes de enlaces (Ko-fi) */ .kofi-link img { max-width: 120px; transition: transform 0.3s; } .kofi-link img:hover { transform: scale(1.1); } /* ---------------------- */ /* Estilos para Advanced Options */ /* Encabezado del Accordion (Advanced Options) en rosa */ .advanced-accordion > summary { color: #ff1493 !important; text-shadow: 0 0 10px #ff1493, 0 0 20px #ff1493; font-weight: bold; } /* Opciones dentro de Advanced Options en azul eléctrico */ .advanced-accordion label { color: #00ffff !important; text-shadow: 0 0 5px #00ffff; } """ JS = """function () { gradioURL = window.location.href if (!gradioURL.endsWith('?__theme=dark')) { window.location.replace(gradioURL + '?__theme=dark'); } }""" def enable_lora(lora_add): if not lora_add: return basemodel else: return lora_add async def generate_image( prompt: str, model: str, lora_word: str, width: int = 768, height: int = 1024, scales: float = 3.5, steps: int = 24, seed: int = -1): if seed == -1: seed = random.randint(0, MAX_SEED) seed = int(seed) print(f'prompt:{prompt}') text = str(translator.translate(prompt, 'English')) + "," + lora_word client = AsyncInferenceClient() try: image = await client.text_to_image( prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model, ) except Exception as e: raise gr.Error(f"Error in {e}") return image, seed async def gen( prompt: str, lora_add: str = "", lora_word: str = "", width: int = 768, height: int = 1024, scales: float = 3.5, steps: int = 24, seed: int = -1, progress=gr.Progress(track_tqdm=True) ): model = enable_lora(lora_add) print(model) image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed) return image, seed def update_lora_fields(selected_lora): if selected_lora: try: url = f"https://huggingface.co/{selected_lora}/raw/main/README.md" r = requests.get(url) if r.status_code == 200: readme = r.text match = re.search(r"instance_prompt:\s*(.*)", readme) if match: prompt_trigger = match.group(1).strip() else: prompt_trigger = "" else: prompt_trigger = "" except Exception as e: prompt_trigger = "" return selected_lora, prompt_trigger return "", "" lora_list = [ "RedSparkie/celinedept", "RedSparkie/anahinestrosa", "RedSparkie/abby", "RedSparkie/sexyemogirl", "RedSparkie/miare", "RedSparkie/lagatadeschroedinger", "RedSparkie/silviaalonso", "RedSparkie/pruckute", "RedSparkie/esterexposito", "RedSparkie/oridemier", "RedSparkie/alicedamato-rank4-experiment", "RedSparkie/aitanaocana-schnell", "RedSparkie/lolalolita", "RedSparkie/masi", "RedSparkie/andreabotez", "RedSparkie/aitanaocana", "RedSparkie/halleburns", "RedSparkie/llunaclark", "RedSparkie/alicedamato" ] examples = [] for lora in lora_list: sujeto = lora.split("/")[-1] prompt_example = f"Realistic photograph of a woman, {sujeto}" examples.append([prompt_example, lora, "LLLLT"]) with gr.Blocks(css=CSS, js=JS, theme="dark") as demo: gr.HTML("""
Use any LORA through HugginFace inference API.
If you enjoy, please donate to my Ko‑fi so I can keep contributing.