#!/usr/bin/env python import gradio as gr import requests import io import random import os import time import numpy as np import subprocess import torch import json import uuid import spaces from typing import Tuple from transformers import AutoProcessor, AutoModelForCausalLM from PIL import Image from deep_translator import GoogleTranslator from datetime import datetime from theme import theme from typing import Tuple from fastapi import FastAPI app = FastAPI() API_TOKEN = os.getenv("HF_READ_TOKEN") headers = {"Authorization": f"Bearer {API_TOKEN}"} timeout = 100 NUM_IMAGES_PER_PROMPT = 1 def flip_image(x): return np.fliplr(x) def clear(): return None def result(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=100, width=896, height=1152): if prompt == "" or prompt == None: return None if lora_id.strip() == "" or lora_id == None: lora_id = "black-forest-labs/FLUX.1-dev" key = random.randint(0, 999) API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip() API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")]) headers = {"Authorization": f"Bearer {API_TOKEN}"} # prompt = GoogleTranslator(source='ru', target='en').translate(prompt) # print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') prompt = GoogleTranslator(source='ru', target='en').translate(prompt) print(f'\033[1mGeneration {key} translation:\033[0m {prompt}') prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect." print(f'\033[1mGeneration {key}:\033[0m {prompt}') # If seed is -1, generate a random seed and use it if seed == -1: seed = random.randint(1, 1000000000) # Prepare the payload for the API call, including width and height payload = { "inputs": prompt, "is_negative": is_negative, "steps": steps, "cfg_scale": cfg_scale, "seed": seed if seed != -1 else random.randint(1, 1000000000), "strength": strength, "num_images_per_prompt": NUM_IMAGES_PER_PROMPT, "parameters": { "width": width, # Pass the width to the API "height": height # Pass the height to the API } } response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) if response.status_code != 200: print(f"Error: Failed to get image. Response status: {response.status_code}") print(f"Response content: {response.text}") if response.status_code == 503: raise gr.Error(f"{response.status_code} : The model is being loaded") raise gr.Error(f"{response.status_code}") try: image_bytes = response.content image = Image.open(io.BytesIO(image_bytes)) print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})') return image, seed except Exception as e: print(f"Error when trying to open the image: {e}") return None examples = [ "a beautiful woman with blonde hair and blue eyes", "a beautiful woman with brown hair and grey eyes", "a beautiful woman with black hair and brown eyes", ] css = """ #app-container { max-width: 930px; margin-left: auto; margin-right: auto; } ".gradio-container {background: url('file=abstract.jpg')} """ with gr.Blocks(theme=theme, css=css, elem_id="app-container") as app: gr.HTML("
🎨 FLUX.1-Dev with LoRA 🇬🇧
") with gr.Tab("Text to Image"): with gr.Column(elem_id="app-container"): with gr.Row(): with gr.Column(elem_id="prompt-container"): with gr.Row(): text_prompt = gr.Textbox(label="Image Prompt", placeholder="Enter a prompt here", lines=2, show_copy_button = True, elem_id="prompt-text-input") with gr.Row(): with gr.Accordion("🎨 Lora trigger words", open=False): gr.Markdown(""" - **sdxl-realistic**: szn style - **Flux-Realism-FineDetailed**: Fine Detailed - **Fashion-Hut-Modeling-LoRA**: Modeling - **SD3.5-Large-Turbo-HyperRealistic-LoRA**: hyper realistic - **Flux-Fine-Detail-LoRA**: Super Detail - **SD3.5-Turbo-Realism-2.0-LoRA**: Turbo Realism - **Canopus-LoRA-Flux-UltraRealism-2.0**: Ultra realistic - **extremely-detailed**: extremely detailed - **SD3.5-Large-Photorealistic-LoRA**: photorealistic - **Flux.1-Dev-LoRA-HDR-Realism**: HDR - **prithivMLmods/Ton618-Epic-Realism-Flux-LoRA**: Epic Realism - **john-singer-sargent-style**: John Singer Sargent Style - **alphonse-mucha-style**: Alphonse Mucha Style - **ultra-realistic-illustration**: ultra realistic illustration - **eye-catching**: eye-catching - **john-constable-style**: John Constable Style - **film-noir**: in the style of FLMNR - **flux-lora-pro-headshot**: PROHEADSHOT """) with gr.Row(): custom_lora = gr.Dropdown([" ", "jwu114/lora-sdxl-realistic", "prithivMLmods/Flux-Realism-FineDetailed", "prithivMLmods/Fashion-Hut-Modeling-LoRA", "prithivMLmods/SD3.5-Large-Turbo-HyperRealistic-LoRA", "prithivMLmods/Flux-Fine-Detail-LoRA", "prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA", "hugovntr/flux-schnell-realism", "fofr/sdxl-deep-down", "prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0", "ntc-ai/SDXL-LoRA-slider.extremely-detailed", "prithivMLmods/Canopus-LoRA-Flux-FaceRealism", "prithivMLmods/SD3.5-Large-Photorealistic-LoRA", "prithivMLmods/Flux.1-Dev-LoRA-HDR-Realism", "prithivMLmods/Ton618-Epic-Realism-Flux-LoRA", "KappaNeuro/john-singer-sargent-style", "KappaNeuro/alphonse-mucha-style", "ntc-ai/SDXL-LoRA-slider.ultra-realistic-illustration", "ntc-ai/SDXL-LoRA-slider.eye-catching", "KappaNeuro/john-constable-style", "dvyio/flux-lora-film-noir", "dvyio/flux-lora-pro-headshot"], label="Custom LoRA",) with gr.Row(): with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="settings-container"): negative_prompt = gr.Textbox(label="Negative Prompt", lines=5, placeholder="What should not be in the image", value=" (visible hand:1.3), (ugly:1.3), (duplicate:1.2), (morbid:1.1), (mutilated:1.1), out of frame, bad face, extra fingers, mutated hands, (poorly drawn hands:1.1), (poorly drawn face:1.3), (mutation:1.3), (deformed:1.3), blurry, (bad anatomy:1.1), (bad proportions:1.2), (extra limbs:1.1), cloned face, (disfigured:1.2), gross proportions, malformed limbs, (missing arms:1.1), (missing legs:1.1), (extra arms:1.2), (extra legs:1.2), fused fingers, too many fingers, (long neck:1.2), sketched by bad-artist, (bad-image-v2-39000:1.3) ") with gr.Row(): width = gr.Slider(label="Image Width", value=896, minimum=64, maximum=1216, step=32) height = gr.Slider(label="Image Height", value=1152, minimum=64, maximum=1216, step=32) steps = gr.Slider(label="Sampling steps", value=50, minimum=1, maximum=100, step=1) cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5) method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "DEIS", "LMS", "DPM Adaptive", "DPM++ 2M", "DPM2 Ancestral", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "dpmpp_2s_ancestral", "Euler", "Euler CFG PP", "Euler a", "Euler Ancestral", "Euler+beta", "Heun", "Heun PP2", "DDIM", "PLMS", "UniPC", "UniPC BH2"]) strength = gr.Slider(label="Prompt Strength", value=100, minimum=0, maximum=100, step=1) num_images_per_prompt = gr.Slider(label="Images", minimum=1, maximum=5, step=1, value=2, interactive=True) seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) with gr.Row(): with gr.Accordion("🫘Seed", open=False): seed_output = gr.Textbox(label="Seed Used", show_copy_button = True, elem_id="seed-output") with gr.Row(): text_button = gr.Button("Run", variant='primary', elem_id="gen-button") result = gr.Gallery (label="Result", columns=1, preview=True) clr_button =gr.Button("Clear Prompt",variant="primary", elem_id="clear_button") clr_button.click(lambda: gr.Textbox(value=""), None, text_prompt) with gr.Row(): image_output = gr.Image(type="pil", label="Image Output", format="png", show_share_button=False, elem_id="gallery") with gr.Row(): clear_btn = gr.Button(value="Clear Image", variant="primary", elem_id="clear_button") clear_btn.click(clear, inputs=[], outputs=[image_output]) gr.Examples( examples = examples, inputs = [text_prompt], outputs=[result, seed], ) text_button.click(result, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output]) with gr.Tab("Flip Image"): with gr.Row(): image_input = gr.Image() image_output = gr.Image(format="png") with gr.Row(): image_button = gr.Button("Run", variant='primary') image_button.click(flip_image, inputs=image_input, outputs=image_output, concurrency_limit=2) app.queue(default_concurrency_limit=200, max_size=200) # <-- Sets up a queue with default parameters if __name__ == "__main__": app.launch(show_api=False)