test / app.py
kayte0342's picture
Update app.py
50edcdc verified
import os
import gradio as gr
import json
import logging
import torch
from PIL import Image
import spaces
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
from diffusers.utils import load_image
from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
import copy
import random
import time
import subprocess # Make sure this line is present!
# --- Install huggingface_hub[cli] ---
subprocess.run("pip install huggingface_hub[cli]", shell=True, check=True)
# --- Authenticate to Hugging Face ---
# Try to get the token from the environment; if not found, prompt for it manually.
from huggingface_hub import login, hf_hub_download, HfFileSystem, ModelCard, snapshot_download
hf_token = os.environ.get("HF_TOKEN")
if not hf_token:
hf_token = input("Enter your Hugging Face token: ").strip()
# Optionally, you can set the token as an environment variable for the remainder of the session:
os.environ["HF_TOKEN"] = hf_token
if hf_token:
login(hf_token)
print("Successfully authenticated to Hugging Face.")
else:
print("No token provided. Some features may not work without authentication.")
# Load LoRAs from JSON file
with open('loras.json', 'r') as f:
loras = json.load(f)
# Create a list of options for the LoRAs.
# You could use the index or a descriptive label.
lora_options = [f"{idx}: {lora['title']}" for idx, lora in enumerate(loras)]
# Create a CheckboxGroup that returns a list of selected options (as strings)
selected_lora_indices = gr.CheckboxGroup(choices=lora_options, label="Select LoRAs to load", value=[])
# Initialize the base model
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
base_model = "black-forest-labs/FLUX.1-dev"
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
vae=good_vae,
transformer=pipe.transformer,
text_encoder=pipe.text_encoder,
tokenizer=pipe.tokenizer,
text_encoder_2=pipe.text_encoder_2,
tokenizer_2=pipe.tokenizer_2,
torch_dtype=dtype
)
# Disable the safety (censor) mechanism
pipe.safety_checker = lambda images, clip_input, **kwargs: (images, False)
pipe_i2i.safety_checker = lambda images, clip_input, **kwargs: (images, False)
MAX_SEED = 2**32-1
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
class calculateDuration:
def __init__(self, activity_name=""):
self.activity_name = activity_name
def __enter__(self):
self.start_time = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end_time = time.time()
self.elapsed_time = self.end_time - self.start_time
if self.activity_name:
print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
else:
print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
def parse_selected_indices(selected_options):
indices = []
for option in selected_options:
try:
index = int(option.split(":")[0])
indices.append(index)
except Exception:
continue
return indices
def update_selection(evt: gr.SelectData, width, height):
selected_lora = loras[evt.index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
if "aspect" in selected_lora:
if selected_lora["aspect"] == "portrait":
width = 768
height = 1024
elif selected_lora["aspect"] == "landscape":
width = 1024
height = 768
else:
width = 1024
height = 1024
return (
gr.update(placeholder=new_placeholder),
updated_text,
evt.index,
width,
height,
)
@spaces.GPU(duration=70)
def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress):
pipe.to("cuda")
generator = torch.Generator(device="cuda").manual_seed(seed)
with calculateDuration("Generating image"):
# Generate image
for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
prompt=prompt_mash,
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
output_type="pil",
good_vae=good_vae,
):
yield img
def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
generator = torch.Generator(device="cuda").manual_seed(seed)
pipe_i2i.to("cuda")
image_input = load_image(image_input_path)
final_image = pipe_i2i(
prompt=prompt_mash,
image=image_input,
strength=image_strength,
num_inference_steps=steps,
guidance_scale=cfg_scale,
width=width,
height=height,
generator=generator,
joint_attention_kwargs={"scale": lora_scale},
output_type="pil",
).images[0]
return final_image
@spaces.GPU(duration=70)
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices_json, selected_weights_json, randomize_seed, seed, width, height, global_lora_scale, progress=gr.Progress(track_tqdm=True)):
import json
# Parse the JSON strings
selected_indices = json.loads(selected_indices_json)
selected_weights = json.loads(selected_weights_json) if selected_weights_json else {}
if not selected_indices:
raise gr.Error("You must select at least one LoRA before proceeding.")
# Combine trigger words from all selected LoRAs
prompt_mash = prompt
for idx in selected_indices:
selected_lora = loras[idx]
if "trigger_word" in selected_lora and selected_lora["trigger_word"]:
prompt_mash = f"{selected_lora['trigger_word']} {prompt_mash}"
with calculateDuration("Unloading LoRA"):
pipe.unload_lora_weights()
pipe_i2i.unload_lora_weights()
with calculateDuration("Loading LoRA weights"):
pipe_to_use = pipe_i2i if image_input is not None else pipe
for idx in selected_indices:
selected_lora = loras[idx]
weight_name = selected_lora.get("weights", None)
# Get the individual weight for this LoRA from the selected_weights mapping.
# If not found, default to 0.95.
lora_weight = selected_weights.get(str(idx), 0.95)
pipe_to_use.load_lora_weights(
selected_lora["repo"],
weight_name=weight_name,
low_cpu_mem_usage=True,
lora_weight=lora_weight # This parameter should be supported by your load function.
)
with calculateDuration("Randomizing seed"):
if randomize_seed:
seed = random.randint(0, 2**32-1)
if image_input is not None:
final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, global_lora_scale, seed)
yield final_image, seed, gr.update(visible=False)
else:
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, global_lora_scale, progress)
final_image = None
step_counter = 0
for image in image_generator:
step_counter += 1
final_image = image
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
yield image, seed, gr.update(value=progress_bar, visible=True)
yield final_image, seed, gr.update(value=progress_bar, visible=False)
def get_huggingface_safetensors(link):
split_link = link.split("/")
if(len(split_link) == 2):
model_card = ModelCard.load(link)
base_model = model_card.data.get("base_model")
print(base_model)
if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
raise Exception("Not a FLUX LoRA!")
image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
trigger_word = model_card.data.get("instance_prompt", "")
image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
fs = HfFileSystem()
try:
list_of_files = fs.ls(link, detail=False)
for file in list_of_files:
if(file.endswith(".safetensors")):
safetensors_name = file.split("/")[-1]
if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
image_elements = file.split("/")
image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
except Exception as e:
print(e)
gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
return split_link[1], link, safetensors_name, trigger_word, image_url
def check_custom_model(link):
if(link.startswith("https://")):
if(link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co")):
link_split = link.split("huggingface.co/")
return get_huggingface_safetensors(link_split[1])
else:
return get_huggingface_safetensors(link)
def add_custom_lora(custom_lora):
global loras
if(custom_lora):
try:
title, repo, path, trigger_word, image = check_custom_model(custom_lora)
print(f"Loaded custom LoRA: {repo}")
card = f'''
<div class="custom_lora_card">
<span>Loaded custom LoRA:</span>
<div class="card_internal">
<img src="{image}" />
<div>
<h3>{title}</h3>
<small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
</div>
</div>
</div>
'''
existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
if(not existing_item_index):
new_item = {
"image": image,
"title": title,
"repo": repo,
"weights": path,
"trigger_word": trigger_word
}
print(new_item)
existing_item_index = len(loras)
loras.append(new_item)
return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
except Exception as e:
gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-FLUX LoRA")
return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-FLUX LoRA"), gr.update(visible=True), gr.update(), "", None, ""
else:
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
def remove_custom_lora():
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
run_lora.zerogpu = True
css = '''
#gen_btn { height: 100%; }
#gen_column { align-self: stretch; }
#title { text-align: center; }
#title h1 { font-size: 3em; display: inline-flex; align-items: center; }
#title img { width: 100px; margin-right: 0.5em; }
#lora_list { background: var(--block-background-fill); padding: 0 1em .3em; font-size: 90%; }
.card_internal { display: flex; height: 100px; margin-top: .5em; }
.card_internal img { margin-right: 1em; }
.styler { --form-gap-width: 0px !important; }
#progress { height: 30px; }
.progress-container { width: 100%; height: 30px; background-color: #f0f0f0; border-radius: 15px; overflow: hidden; margin-bottom: 20px; }
.progress-bar { height: 100%; background-color: #4f46e5; width: calc(var(--current) / var(--total) * 100%); transition: width 0.5s ease-in-out; }
'''
font = [gr.themes.GoogleFont("Source Sans Pro"), "Arial", "sans-serif"]
with gr.Blocks(theme=gr.themes.Soft(font=font), css=css, delete_cache=(60, 60)) as app:
title = gr.HTML(
"""<h1><img src="https://huggingface.co/spaces/kayte0342/test/resolve/main/DA4BE61E-A0BD-4254-A1B6-AD3C05D18A9C%20(1).png?download=true" alt="LoRA"> FLUX LoRA Kayte's Space</h1>""",
elem_id="title",
)
# Hidden textboxes to store the JSON outputs:
selected_indices_hidden = gr.Textbox(value="[]", visible=False)
selected_weights_hidden = gr.Textbox(value="{}", visible=False)
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
with gr.Column(scale=1, elem_id="gen_column"):
generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
with gr.Row():
with gr.Column():
selected_info = gr.Markdown("")
# Build a custom layout for LoRA selection.
lora_selection_container = gr.Column()
# We'll collect checkbox and slider components in lists.
lora_checkbox_list = []
lora_slider_list = []
for idx, lora in enumerate(loras):
with gr.Row():
gr.Image(label=lora["title"], height=100)
checkbox = gr.Checkbox(label="Select", value=False, elem_id=f"lora_checkbox_{idx}")
slider = gr.Slider(label="Weight", minimum=0, maximum=3, step=0.01, value=0.95, elem_id=f"lora_weight_{idx}")
lora_checkbox_list.append(checkbox)
lora_slider_list.append(slider)
gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
with gr.Column():
progress_bar = gr.Markdown(elem_id="progress", visible=False)
result = gr.Image(label="Generated Image")
with gr.Row():
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
input_image = gr.Image(label="Input image", type="filepath")
image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
with gr.Column():
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
with gr.Row():
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
with gr.Row():
randomize_seed = gr.Checkbox(True, label="Randomize seed")
seed = gr.Slider(label="Seed", minimum=0, maximum=2**32-1, step=1, value=0, randomize=True)
lora_scale = gr.Slider(label="Global LoRA Scale", minimum=0, maximum=3, step=0.01, value=0.95)
# Function to combine checkbox selections into a JSON list of indices.
def combine_selections(*checkbox_values):
selected_indices = [i for i, v in enumerate(checkbox_values) if v]
return json.dumps(selected_indices)
# Function to combine all slider values into a JSON dictionary mapping index to weight.
def combine_weights(*slider_values):
weights = {str(i): v for i, v in enumerate(slider_values)}
return json.dumps(weights)
# Chain the updates when the Generate button is clicked:
# First, update the checkbox hidden state, then update the slider hidden state, then call run_lora.
generate_button.click(
combine_selections,
inputs=lora_checkbox_list,
outputs=selected_indices_hidden
).then(
combine_weights,
inputs=lora_slider_list,
outputs=selected_weights_hidden
).then(
run_lora,
inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices_hidden, selected_weights_hidden, randomize_seed, seed, width, height, lora_scale],
outputs=[result, seed, progress_bar]
)
# Update the selected_info display when the selected_indices_hidden changes.
def update_info(selected_json):
selected_indices = json.loads(selected_json)
if selected_indices:
info = "Selected LoRAs: " + ", ".join([loras[i]["title"] for i in selected_indices])
else:
info = "No LoRAs selected."
return info
selected_indices_hidden.change(
update_info,
inputs=selected_indices_hidden,
outputs=selected_info
)
app.queue()
app.launch()