Spaces:
Running
on
Zero
Running
on
Zero
Revision update 0.5.21
Browse fileslora_gallery (preview)
new luts
more hexagon text options
faster loading
Gradio 5.21.0
Fill model
- LUT/Colorful.cube +0 -0
- LUT/Contrast.cube +0 -0
- LUT/PureWhites.cube +0 -0
- LUT/Saturation.cube +0 -0
- README.md +1 -1
- app.py +367 -200
- images/prerendered/th/FLUX.1-Fill-dev.png +3 -0
- images/prerendered/th/FLUX.1-dev.png +3 -0
- images/prerendered/th/FLUX.1-schnell.png +3 -0
- images/prerendered/th/Flex.1-alpha.png +3 -0
- style_20250128.css → style_20250314.css +11 -5
- utils/constants.py +219 -15
- utils/hex_grid.py +5 -0
- utils/image_utils.py +64 -25
- utils/lora_details.py +30 -9
- utils/misc.py +18 -1
LUT/Colorful.cube
ADDED
The diff for this file is too large to render.
See raw diff
|
|
LUT/Contrast.cube
ADDED
The diff for this file is too large to render.
See raw diff
|
|
LUT/PureWhites.cube
ADDED
The diff for this file is too large to render.
See raw diff
|
|
LUT/Saturation.cube
ADDED
The diff for this file is too large to render.
See raw diff
|
|
README.md
CHANGED
@@ -5,7 +5,7 @@ colorFrom: yellow
|
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
python_version: 3.10.13
|
8 |
-
sdk_version: 5.
|
9 |
app_file: app.py
|
10 |
pinned: true
|
11 |
short_description: Transform Your Images into Mesmerizing Hexagon Grids
|
|
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
python_version: 3.10.13
|
8 |
+
sdk_version: 5.21.0
|
9 |
app_file: app.py
|
10 |
pinned: true
|
11 |
short_description: Transform Your Images into Mesmerizing Hexagon Grids
|
app.py
CHANGED
@@ -11,11 +11,12 @@ from PIL import Image, ImageFilter
|
|
11 |
from easydict import EasyDict as edict
|
12 |
import utils.constants as constants
|
13 |
from haishoku.haishoku import Haishoku
|
|
|
14 |
|
15 |
from tempfile import NamedTemporaryFile
|
16 |
import atexit
|
17 |
import random
|
18 |
-
|
19 |
from transformers import AutoTokenizer, DPTImageProcessor, DPTForDepthEstimation
|
20 |
from trellis.pipelines import TrellisImageTo3DPipeline
|
21 |
from trellis.representations import Gaussian, MeshExtractResult
|
@@ -40,14 +41,14 @@ from utils.misc import (
|
|
40 |
get_filename,
|
41 |
pause,
|
42 |
convert_ratio_to_dimensions,
|
|
|
43 |
get_seed,
|
44 |
-
get_output_name
|
45 |
) #install_cuda_toolkit,install_torch, _get_output, setup_runtime_env)
|
46 |
|
47 |
from utils.image_utils import (
|
48 |
change_color,
|
49 |
open_image,
|
50 |
-
build_prerendered_images_by_quality,
|
51 |
upscale_image,
|
52 |
lerp_imagemath,
|
53 |
shrink_and_paste_on_blank,
|
@@ -56,10 +57,12 @@ from utils.image_utils import (
|
|
56 |
multiply_and_blend_images,
|
57 |
alpha_composite_with_control,
|
58 |
crop_and_resize_image,
|
|
|
59 |
convert_to_rgba_png,
|
60 |
resize_image_with_aspect_ratio,
|
61 |
build_prerendered_images_by_quality,
|
62 |
-
get_image_from_dict
|
|
|
63 |
)
|
64 |
|
65 |
from utils.hex_grid import (
|
@@ -82,16 +85,28 @@ from utils.excluded_colors import (
|
|
82 |
|
83 |
from utils.lora_details import (
|
84 |
upd_prompt_notes,
|
|
|
85 |
split_prompt_precisely,
|
86 |
approximate_token_count,
|
87 |
-
get_trigger_words
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
)
|
89 |
-
from diffusers import FluxPipeline,FluxImg2ImgPipeline,FluxControlPipeline
|
90 |
|
91 |
PIPELINE_CLASSES = {
|
92 |
"FluxPipeline": FluxPipeline,
|
93 |
"FluxImg2ImgPipeline": FluxImg2ImgPipeline,
|
94 |
-
"FluxControlPipeline": FluxControlPipeline
|
|
|
95 |
}
|
96 |
|
97 |
from utils.version_info import (
|
@@ -103,8 +118,15 @@ from utils.version_info import (
|
|
103 |
#from utils.depth_estimation import (get_depth_map_from_state)
|
104 |
|
105 |
input_image_palette = []
|
106 |
-
current_prerendered_image = gr.State("./images/
|
107 |
user_dir = constants.TMPDIR
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
|
109 |
# Register the cleanup function
|
110 |
atexit.register(cleanup_temp_files)
|
@@ -209,18 +231,149 @@ def get_model_and_lora(model_textbox):
|
|
209 |
default_model = model_textbox
|
210 |
return default_model, []
|
211 |
|
212 |
-
|
213 |
-
"
|
214 |
-
|
215 |
-
"
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
|
221 |
-
# @spaces.GPU(duration=140, progress=gr.Progress(track_tqdm=True))
|
222 |
-
# def generate_image(pipe, generate_params, progress=gr.Progress(track_tqdm=True)):
|
223 |
-
# return pipe(**generate_params)
|
224 |
|
225 |
@spaces.GPU(duration=200, progress=gr.Progress(track_tqdm=True))
|
226 |
def generate_image_lowmem(
|
@@ -229,6 +382,7 @@ def generate_image_lowmem(
|
|
229 |
model_name="black-forest-labs/FLUX.1-dev",
|
230 |
lora_weights=None,
|
231 |
conditioned_image=None,
|
|
|
232 |
image_width=1368,
|
233 |
image_height=848,
|
234 |
guidance_scale=3.5,
|
@@ -240,28 +394,18 @@ def generate_image_lowmem(
|
|
240 |
additional_parameters=None,
|
241 |
progress=gr.Progress(track_tqdm=True)
|
242 |
):
|
243 |
-
#from torch import cuda, bfloat16, float32, Generator, no_grad, backends
|
244 |
-
# Retrieve the pipeline class from the mapping
|
245 |
-
pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
|
246 |
-
if not pipeline_class:
|
247 |
-
raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
|
248 |
-
f"Available options: {list(PIPELINE_CLASSES.keys())}")
|
249 |
-
|
250 |
-
#initialize_cuda()
|
251 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
252 |
-
from src.condition import Condition
|
253 |
-
|
254 |
-
print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
|
255 |
-
#print(f"\n {get_torch_info()}\n")
|
256 |
-
# Disable gradient calculations
|
257 |
with torch.no_grad():
|
258 |
-
#
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
|
|
|
|
|
|
|
|
265 |
# alternative version that may be more efficient
|
266 |
# pipe.enable_sequential_cpu_offload()
|
267 |
if pipeline_name == "FluxPipeline":
|
@@ -271,127 +415,34 @@ def generate_image_lowmem(
|
|
271 |
else:
|
272 |
pipe.enable_model_cpu_offload()
|
273 |
|
274 |
-
# Access the tokenizer from the pipeline
|
275 |
-
tokenizer = pipe.tokenizer
|
276 |
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
else:
|
290 |
-
pipe.attn_implementation="flash_attention_2"
|
291 |
-
print("\nEnabled flash_attention_2.\n")
|
292 |
|
293 |
-
condition_type = "subject"
|
294 |
-
# Load LoRA weights
|
295 |
-
# note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
|
296 |
-
if lora_weights:
|
297 |
-
for lora_weight in lora_weights:
|
298 |
-
lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
|
299 |
-
lora_weight_set = False
|
300 |
-
if lora_configs:
|
301 |
-
for config in lora_configs:
|
302 |
-
# Load LoRA weights with optional weight_name and adapter_name
|
303 |
-
if 'weight_name' in config:
|
304 |
-
weight_name = config.get("weight_name")
|
305 |
-
adapter_name = config.get("adapter_name")
|
306 |
-
lora_collection = config.get("lora_collection")
|
307 |
-
if weight_name and adapter_name and lora_collection and lora_weight_set == False:
|
308 |
-
pipe.load_lora_weights(
|
309 |
-
lora_collection,
|
310 |
-
weight_name=weight_name,
|
311 |
-
adapter_name=adapter_name,
|
312 |
-
token=constants.HF_API_TOKEN
|
313 |
-
)
|
314 |
-
lora_weight_set = True
|
315 |
-
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
|
316 |
-
elif weight_name and adapter_name==None and lora_collection and lora_weight_set == False:
|
317 |
-
pipe.load_lora_weights(
|
318 |
-
lora_collection,
|
319 |
-
weight_name=weight_name,
|
320 |
-
token=constants.HF_API_TOKEN
|
321 |
-
)
|
322 |
-
lora_weight_set = True
|
323 |
-
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
|
324 |
-
elif weight_name and adapter_name and lora_weight_set == False:
|
325 |
-
pipe.load_lora_weights(
|
326 |
-
lora_weight,
|
327 |
-
weight_name=weight_name,
|
328 |
-
adapter_name=adapter_name,
|
329 |
-
token=constants.HF_API_TOKEN
|
330 |
-
)
|
331 |
-
lora_weight_set = True
|
332 |
-
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
333 |
-
elif weight_name and adapter_name==None and lora_weight_set == False:
|
334 |
-
pipe.load_lora_weights(
|
335 |
-
lora_weight,
|
336 |
-
weight_name=weight_name,
|
337 |
-
token=constants.HF_API_TOKEN
|
338 |
-
)
|
339 |
-
lora_weight_set = True
|
340 |
-
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
341 |
-
elif lora_weight_set == False:
|
342 |
-
pipe.load_lora_weights(
|
343 |
-
lora_weight,
|
344 |
-
token=constants.HF_API_TOKEN
|
345 |
-
)
|
346 |
-
lora_weight_set = True
|
347 |
-
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
348 |
-
# Apply 'pipe' configurations if present
|
349 |
-
if 'pipe' in config:
|
350 |
-
pipe_config = config['pipe']
|
351 |
-
for method_name, params in pipe_config.items():
|
352 |
-
method = getattr(pipe, method_name, None)
|
353 |
-
if method:
|
354 |
-
print(f"Applying pipe method: {method_name} with params: {params}")
|
355 |
-
method(**params)
|
356 |
-
else:
|
357 |
-
print(f"Method {method_name} not found in pipe.")
|
358 |
-
if 'condition_type' in config:
|
359 |
-
condition_type = config['condition_type']
|
360 |
-
if condition_type == "coloring":
|
361 |
-
#pipe.enable_coloring()
|
362 |
-
print("\nEnabled coloring.\n")
|
363 |
-
elif condition_type == "deblurring":
|
364 |
-
#pipe.enable_deblurring()
|
365 |
-
print("\nEnabled deblurring.\n")
|
366 |
-
elif condition_type == "fill":
|
367 |
-
#pipe.enable_fill()
|
368 |
-
print("\nEnabled fill.\n")
|
369 |
-
elif condition_type == "depth":
|
370 |
-
#pipe.enable_depth()
|
371 |
-
print("\nEnabled depth.\n")
|
372 |
-
elif condition_type == "canny":
|
373 |
-
#pipe.enable_canny()
|
374 |
-
print("\nEnabled canny.\n")
|
375 |
-
elif condition_type == "subject":
|
376 |
-
#pipe.enable_subject()
|
377 |
-
print("\nEnabled subject.\n")
|
378 |
-
else:
|
379 |
-
print(f"Condition type {condition_type} not implemented.")
|
380 |
-
else:
|
381 |
-
pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
|
382 |
# Set the random seed for reproducibility
|
383 |
generator = torch.Generator(device=device).manual_seed(seed)
|
384 |
-
conditions = []
|
385 |
if conditioned_image is not None:
|
386 |
-
conditioned_image =
|
387 |
-
condition = Condition(condition_type, conditioned_image)
|
388 |
-
conditions.append(condition)
|
389 |
print(f"\nAdded conditioned image.\n {conditioned_image.size}")
|
390 |
# Prepare the parameters for image generation
|
391 |
additional_parameters ={
|
392 |
"strength": strength,
|
393 |
"image": conditioned_image,
|
394 |
}
|
|
|
395 |
else:
|
396 |
print("\nNo conditioned image provided.")
|
397 |
if neg_prompt!=None:
|
@@ -405,11 +456,11 @@ def generate_image_lowmem(
|
|
405 |
prompt, prompt2 = split_prompt_precisely(text)
|
406 |
prompt_parameters = {
|
407 |
"prompt" : prompt,
|
408 |
-
"prompt_2": prompt2
|
409 |
}
|
410 |
else:
|
411 |
prompt_parameters = {
|
412 |
-
"prompt" :text
|
413 |
}
|
414 |
additional_parameters.update(prompt_parameters)
|
415 |
# Combine all parameters
|
@@ -418,24 +469,29 @@ def generate_image_lowmem(
|
|
418 |
"width": image_width,
|
419 |
"guidance_scale": guidance_scale,
|
420 |
"num_inference_steps": num_inference_steps,
|
421 |
-
"generator": generator,
|
|
|
422 |
if additional_parameters:
|
423 |
generate_params.update(additional_parameters)
|
424 |
generate_params = {k: v for k, v in generate_params.items() if v is not None}
|
425 |
print(f"generate_params: {generate_params}")
|
426 |
# Generate the image
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
|
|
|
|
|
|
|
|
435 |
torch.cuda.empty_cache()
|
436 |
torch.cuda.ipc_collect()
|
437 |
print(torch.cuda.memory_summary(device=None, abbreviated=False))
|
438 |
-
|
439 |
return image
|
440 |
|
441 |
def generate_ai_image_local (
|
@@ -445,6 +501,7 @@ def generate_ai_image_local (
|
|
445 |
model="black-forest-labs/FLUX.1-dev",
|
446 |
lora_weights=None,
|
447 |
conditioned_image=None,
|
|
|
448 |
height=512,
|
449 |
width=912,
|
450 |
num_inference_steps=30,
|
@@ -453,7 +510,7 @@ def generate_ai_image_local (
|
|
453 |
pipeline_name="FluxPipeline",
|
454 |
strength=0.75,
|
455 |
progress=gr.Progress(track_tqdm=True)
|
456 |
-
):
|
457 |
print(f"Generating image with lowmem")
|
458 |
try:
|
459 |
if map_option != "Prompt":
|
@@ -496,12 +553,19 @@ def generate_ai_image_local (
|
|
496 |
print(f"Conditioned Image: {conditioned_image}")
|
497 |
print(f"Conditioned Image Strength: {strength}")
|
498 |
print(f"pipeline: {pipeline_name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
499 |
image = generate_image_lowmem(
|
500 |
text=prompt,
|
501 |
model_name=model,
|
502 |
neg_prompt=negative_prompt,
|
503 |
lora_weights=lora_weights,
|
504 |
conditioned_image=conditioned_image,
|
|
|
505 |
image_width=width,
|
506 |
image_height=height,
|
507 |
guidance_scale=guidance_scale,
|
@@ -509,7 +573,8 @@ def generate_ai_image_local (
|
|
509 |
seed=seed,
|
510 |
pipeline_name=pipeline_name,
|
511 |
strength=strength,
|
512 |
-
additional_parameters=additional_parameters
|
|
|
513 |
)
|
514 |
with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
|
515 |
image.save(tmp.name, format="PNG")
|
@@ -518,11 +583,11 @@ def generate_ai_image_local (
|
|
518 |
return tmp.name
|
519 |
except Exception as e:
|
520 |
print(f"Error generating AI image: {e}")
|
521 |
-
|
522 |
return None
|
523 |
|
524 |
|
525 |
-
def generate_input_image_click(image_input, map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, randomize_seed=True, seed=None, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=
|
526 |
seed = get_seed(randomize_seed, seed)
|
527 |
|
528 |
# Get the model and LoRA weights
|
@@ -539,18 +604,28 @@ def generate_input_image_click(image_input, map_option, prompt_textbox_value, ne
|
|
539 |
print(f"Conditioned Image: {conditioned_image.size}.. converted to RGB\n")
|
540 |
# use image_input as the conditioned_image if it is not None
|
541 |
elif image_input is not None:
|
542 |
-
|
543 |
-
|
544 |
-
|
|
|
545 |
|
546 |
# Convert image_format from a string split by ":" into two numbers divided
|
547 |
width_ratio, height_ratio = map(int, image_format.split(":"))
|
548 |
aspect_ratio = width_ratio / height_ratio
|
549 |
|
550 |
-
width, height = convert_ratio_to_dimensions(aspect_ratio,
|
551 |
pipeline = "FluxPipeline"
|
552 |
if conditioned_image is not None:
|
553 |
pipeline = "FluxImg2ImgPipeline"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
554 |
# Generate the AI image and get the image path
|
555 |
image_path = generate_ai_image_local(
|
556 |
map_option,
|
@@ -559,6 +634,7 @@ def generate_input_image_click(image_input, map_option, prompt_textbox_value, ne
|
|
559 |
model,
|
560 |
lora_weights,
|
561 |
conditioned_image,
|
|
|
562 |
strength=strength,
|
563 |
height=height,
|
564 |
width=width,
|
@@ -596,6 +672,23 @@ def update_prompt_visibility(map_option):
|
|
596 |
def update_prompt_notes(model_textbox_value):
|
597 |
return upd_prompt_notes(model_textbox_value)
|
598 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
599 |
def on_prerendered_gallery_selection(event_data: gr.SelectData):
|
600 |
global current_prerendered_image
|
601 |
selected_index = event_data.index
|
@@ -632,9 +725,12 @@ def update_sketch_dimensions(input_image, sketch_image):
|
|
632 |
sk_img_path, _ = get_image_from_dict(sketch_image)
|
633 |
sk_img = open_image(sk_img_path)
|
634 |
# Resize sketch image if dimensions don't match input image.
|
635 |
-
if in_img.size != sk_img.size:
|
636 |
sk_img = sk_img.resize(in_img.size, Image.LANCZOS)
|
637 |
-
|
|
|
|
|
|
|
638 |
|
639 |
def composite_with_control_sync(input_image, sketch_image, slider_value):
|
640 |
# Load the images using open_image() if they are provided as file paths.
|
@@ -655,7 +751,36 @@ def replace_input_with_sketch_image(sketch_image):
|
|
655 |
sketch, is_dict = get_image_from_dict(sketch_image)
|
656 |
return sketch
|
657 |
####################################### DEPTH ESTIMATION #######################################
|
658 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
659 |
|
660 |
def preprocess_image(image: Image.Image) -> Image.Image:
|
661 |
"""
|
@@ -796,6 +921,7 @@ def generate_3d_asset_part2(depth_img, image_path, output_name, seed, steps, mod
|
|
796 |
depth_img = Image.open(depth_img).convert("RGBA")
|
797 |
# Preprocess and run the Trellis pipeline with fixed sampler settings
|
798 |
try:
|
|
|
799 |
processed_image = TRELLIS_PIPELINE.preprocess_image(resized_image, max_resolution=model_resolution)
|
800 |
outputs = TRELLIS_PIPELINE.run(
|
801 |
processed_image,
|
@@ -930,7 +1056,7 @@ examples = [["assets//examples//hex_map_p1.png", 32, 1, 0, 0, 0, 0, 0, "#ede9ac4
|
|
930 |
gr.set_static_paths(paths=["images/","images/images","images/prerendered","LUT/","fonts/","assets/"])
|
931 |
|
932 |
# Gradio Blocks Interface
|
933 |
-
with gr.Blocks(css_paths="
|
934 |
with gr.Row():
|
935 |
gr.Markdown("""
|
936 |
# HexaGrid Creator
|
@@ -987,7 +1113,8 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
987 |
key="imgInput",
|
988 |
image_mode=None,
|
989 |
format="PNG",
|
990 |
-
height=
|
|
|
991 |
)
|
992 |
with gr.Accordion("Sketch Pad", open = False, elem_id="sketchpd"):
|
993 |
with gr.Row():
|
@@ -997,23 +1124,21 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
997 |
#invert_colors=True,
|
998 |
#sources=['upload','canvas'],
|
999 |
#tool=['editor','select','color-sketch'],
|
1000 |
-
placeholder="Draw a sketch or upload an image.
|
1001 |
interactive=True,
|
1002 |
elem_classes="centered solid imgcontainer",
|
1003 |
key="imgSketch",
|
1004 |
image_mode="RGBA",
|
1005 |
format="PNG",
|
1006 |
-
brush=gr.Brush()
|
|
|
1007 |
)
|
1008 |
with gr.Row():
|
1009 |
with gr.Column(scale=1):
|
1010 |
-
sketch_replace_input_image_button = gr.Button(
|
1011 |
-
"Replace Input Image with sketch",
|
1012 |
-
elem_id="sketch_replace_input_image_button",
|
1013 |
-
elem_classes="solid"
|
1014 |
-
)
|
1015 |
sketch_alpha_composite_slider = gr.Slider(0,100,50,0.5, label="Sketch Transparancy", elem_id="alpha_composite_slider")
|
1016 |
btn_sketch_alpha_composite = gr.Button("Overlay Sketch on Input Image", elem_id="btn_sketchninput", elem_classes="solid")
|
|
|
1017 |
|
1018 |
with gr.Column():
|
1019 |
with gr.Accordion("Hex Coloring and Exclusion", open = False):
|
@@ -1063,7 +1188,8 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1063 |
examples=[[f] for f in constants.lut_files],
|
1064 |
inputs=[lut_filename],
|
1065 |
outputs=[lut_filename],
|
1066 |
-
label="Select a Filter (LUT) file. Populate the LUT File Name field"
|
|
|
1067 |
)
|
1068 |
|
1069 |
with gr.Row():
|
@@ -1093,8 +1219,20 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1093 |
value="Cossale/Frames2-Flex.1",
|
1094 |
elem_classes="solid",
|
1095 |
elem_id="inference_model",
|
|
|
1096 |
visible=False
|
1097 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1098 |
# Update map_options to a Dropdown with choices from constants.PROMPTS keys
|
1099 |
with gr.Row():
|
1100 |
with gr.Column():
|
@@ -1155,7 +1293,8 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1155 |
with gr.Row():
|
1156 |
with gr.Column(scale=2):
|
1157 |
# Gallery from PRE_RENDERED_IMAGES GOES HERE
|
1158 |
-
prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images_by_quality(3,'thumbnail'), elem_id="gallery",
|
|
|
1159 |
with gr.Column():
|
1160 |
image_guidance_stength = gr.Slider(label="Image Guidance Strength (prompt percentage)", minimum=0, maximum=1.0, value=0.85, step=0.01, interactive=True)
|
1161 |
replace_input_image_button = gr.Button(
|
@@ -1180,14 +1319,14 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1180 |
y_spacing = gr.Number(label="Adjust Vertical spacing", value=3, minimum=-200, maximum=200, precision=1)
|
1181 |
with gr.Row():
|
1182 |
rotation = gr.Slider(-90, 180, 0.0, 0.1, label="Hexagon Rotation (degree)")
|
1183 |
-
add_hex_text = gr.Dropdown(label="Add Text to Hexagons", choices=[None, "Row-Column Coordinates", "Sequential Numbers", "Playing Cards Sequential", "Playing Cards Alternate Red and Black", "Custom List"], value=None)
|
1184 |
with gr.Row():
|
1185 |
custom_text_list = gr.TextArea(label="Custom Text List", value=constants.cards_alternating, visible=False,)
|
1186 |
custom_text_color_list = gr.TextArea(label="Custom Text Color List", value=constants.card_colors_alternating, visible=False)
|
1187 |
with gr.Row():
|
1188 |
hex_text_info = gr.Markdown("""
|
1189 |
### Text Color uses the Border Color and Border Opacity, unless you use a custom list.
|
1190 |
-
### The Custom Text List and Custom Text Color List are comma separated lists.
|
1191 |
### The custom color list is a comma separated list of hex colors.
|
1192 |
#### Example: "A,2,3,4,5,6,7,8,9,10,J,Q,K", "red,#0000FF,#00FF00,red,#FFFF00,#00FFFF,#FF8000,#FF00FF,#FF0080,#FF8000,#FF0080,lightblue"
|
1193 |
""", elem_id="hex_text_info", visible=False)
|
@@ -1230,8 +1369,8 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1230 |
add_border_button = gr.Button("Add Margins", elem_classes="solid", variant="secondary")
|
1231 |
with gr.Row():
|
1232 |
bordered_image_output = gr.Image(label="Image with Margins", image_mode="RGBA", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgBordered",interactive=False, show_download_button=True, show_fullscreen_button=True, show_share_button=True)
|
1233 |
-
|
1234 |
-
with
|
1235 |
with gr.Row():
|
1236 |
depth_image_source = gr.Radio(
|
1237 |
label="Depth Image Source",
|
@@ -1249,7 +1388,7 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1249 |
video_resolution = gr.Slider(384, 768, value=480, step=32, label="Video Resolution (*danger*)", interactive=True)
|
1250 |
model_resolution = gr.Slider(512, 2304, value=1024, step=64, label="3D Model Resolution", interactive=True)
|
1251 |
with gr.Row():
|
1252 |
-
generate_3d_asset_button = gr.Button("Generate 3D Asset", elem_classes="solid", variant="secondary")
|
1253 |
with gr.Row():
|
1254 |
depth_output = gr.Image(label="Depth Map", image_mode="L", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="DepthOutput",interactive=False, show_download_button=True, show_fullscreen_button=True, show_share_button=True, height=400)
|
1255 |
with gr.Row():
|
@@ -1310,7 +1449,7 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1310 |
)
|
1311 |
generate_input_image.click(
|
1312 |
fn=generate_input_image_click,
|
1313 |
-
inputs=[input_image,map_options, prompt_textbox, negative_prompt_textbox, model_textbox, randomize_seed, seed_slider, gr.State(False),
|
1314 |
outputs=[input_image, seed_slider], scroll_to_output=True
|
1315 |
).then(
|
1316 |
fn=update_sketch_dimensions,
|
@@ -1350,6 +1489,14 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1350 |
inputs=model_options,
|
1351 |
outputs=prompt_notes_label
|
1352 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1353 |
composite_button.click(
|
1354 |
fn=lambda input_image, composite_color, composite_opacity: gr.Warning("Please upload an Input Image to get started.") if input_image is None else change_color(input_image, composite_color, composite_opacity),
|
1355 |
inputs=[input_image, composite_color, composite_opacity],
|
@@ -1359,7 +1506,7 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1359 |
#use conditioned_image as the input_image for generate_input_image_click
|
1360 |
generate_input_image_from_gallery.click(
|
1361 |
fn=generate_input_image_click,
|
1362 |
-
inputs=[input_image, map_options, prompt_textbox, negative_prompt_textbox, model_textbox,randomize_seed, seed_slider, gr.State(True), image_guidance_stength, image_size_ratio],
|
1363 |
outputs=[input_image, seed_slider], scroll_to_output=True
|
1364 |
).then(
|
1365 |
fn=update_sketch_dimensions,
|
@@ -1414,7 +1561,18 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1414 |
# inputs=[depth_image_source, resized_width_slider, z_scale_slider, input_image, output_image, overlay_image, bordered_image_output],
|
1415 |
# outputs=[depth_map_output, model_output, model_file], scroll_to_output=True
|
1416 |
# )
|
1417 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1418 |
# Chain the buttons
|
1419 |
generate_3d_asset_button.click(
|
1420 |
fn=generate_3d_asset_part1,
|
@@ -1470,14 +1628,23 @@ if __name__ == "__main__":
|
|
1470 |
|
1471 |
#-------------- ------------------------------------------------MODEL INITIALIZATION------------------------------------------------------------#
|
1472 |
# Load models once during module import
|
1473 |
-
|
1474 |
-
|
1475 |
-
|
1476 |
-
|
1477 |
-
|
1478 |
-
|
1479 |
-
|
1480 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1481 |
hexaGrid.queue(default_concurrency_limit=1,max_size=12,api_open=False)
|
1482 |
hexaGrid.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered", 'e:/TMP'], favicon_path="./assets/favicon.ico", max_file_size="10mb")
|
1483 |
|
|
|
11 |
from easydict import EasyDict as edict
|
12 |
import utils.constants as constants
|
13 |
from haishoku.haishoku import Haishoku
|
14 |
+
from tqdm import tqdm
|
15 |
|
16 |
from tempfile import NamedTemporaryFile
|
17 |
import atexit
|
18 |
import random
|
19 |
+
import accelerate
|
20 |
from transformers import AutoTokenizer, DPTImageProcessor, DPTForDepthEstimation
|
21 |
from trellis.pipelines import TrellisImageTo3DPipeline
|
22 |
from trellis.representations import Gaussian, MeshExtractResult
|
|
|
41 |
get_filename,
|
42 |
pause,
|
43 |
convert_ratio_to_dimensions,
|
44 |
+
update_dimensions_on_ratio,
|
45 |
get_seed,
|
46 |
+
get_output_name
|
47 |
) #install_cuda_toolkit,install_torch, _get_output, setup_runtime_env)
|
48 |
|
49 |
from utils.image_utils import (
|
50 |
change_color,
|
51 |
open_image,
|
|
|
52 |
upscale_image,
|
53 |
lerp_imagemath,
|
54 |
shrink_and_paste_on_blank,
|
|
|
57 |
multiply_and_blend_images,
|
58 |
alpha_composite_with_control,
|
59 |
crop_and_resize_image,
|
60 |
+
resize_and_crop_image,
|
61 |
convert_to_rgba_png,
|
62 |
resize_image_with_aspect_ratio,
|
63 |
build_prerendered_images_by_quality,
|
64 |
+
get_image_from_dict,
|
65 |
+
calculate_optimal_fill_dimensions
|
66 |
)
|
67 |
|
68 |
from utils.hex_grid import (
|
|
|
85 |
|
86 |
from utils.lora_details import (
|
87 |
upd_prompt_notes,
|
88 |
+
upd_prompt_notes_by_index,
|
89 |
split_prompt_precisely,
|
90 |
approximate_token_count,
|
91 |
+
get_trigger_words,
|
92 |
+
is_lora_loaded,
|
93 |
+
get_lora_models
|
94 |
+
)
|
95 |
+
from diffusers import (
|
96 |
+
FluxPipeline,
|
97 |
+
FluxImg2ImgPipeline,
|
98 |
+
FluxControlPipeline,
|
99 |
+
FluxControlPipeline,
|
100 |
+
DiffusionPipeline,
|
101 |
+
AutoencoderTiny,
|
102 |
+
AutoencoderKL
|
103 |
)
|
|
|
104 |
|
105 |
PIPELINE_CLASSES = {
|
106 |
"FluxPipeline": FluxPipeline,
|
107 |
"FluxImg2ImgPipeline": FluxImg2ImgPipeline,
|
108 |
+
"FluxControlPipeline": FluxControlPipeline,
|
109 |
+
"FluxFillPipeline": FluxControlPipeline
|
110 |
}
|
111 |
|
112 |
from utils.version_info import (
|
|
|
118 |
#from utils.depth_estimation import (get_depth_map_from_state)
|
119 |
|
120 |
input_image_palette = []
|
121 |
+
current_prerendered_image = gr.State("./images/Beeuty-1.png")
|
122 |
user_dir = constants.TMPDIR
|
123 |
+
lora_models = get_lora_models()
|
124 |
+
selected_index = gr.State(value=-1)
|
125 |
+
|
126 |
+
image_processor: Optional[DPTImageProcessor] = None
|
127 |
+
depth_model: Optional[DPTForDepthEstimation] = None
|
128 |
+
TRELLIS_PIPELINE: Optional[TrellisImageTo3DPipeline] = None
|
129 |
+
pipe: Optional[Union[FluxPipeline, FluxImg2ImgPipeline, FluxControlPipeline]] = None
|
130 |
|
131 |
# Register the cleanup function
|
132 |
atexit.register(cleanup_temp_files)
|
|
|
231 |
default_model = model_textbox
|
232 |
return default_model, []
|
233 |
|
234 |
+
def set_pipeline(
|
235 |
+
model_name="black-forest-labs/FLUX.1-dev",
|
236 |
+
lora_weights=None,
|
237 |
+
pipeline_name="FluxPipeline",
|
238 |
+
progress=gr.Progress(track_tqdm=True)
|
239 |
+
):
|
240 |
+
global pipe
|
241 |
+
pbar = tqdm(total=7, desc="Pipeline and Model Load")
|
242 |
+
current_pipeline_name =pipe.name_or_path if pipe else None
|
243 |
+
current_pipeline_class = type(pipe).__name__ if pipe else None
|
244 |
+
if (current_pipeline_name != model_name) or (pipeline_name != current_pipeline_class):
|
245 |
+
pipe = None
|
246 |
+
gc.collect()
|
247 |
+
#from torch import cuda, bfloat16, float32, Generator, no_grad, backends
|
248 |
+
# Retrieve the pipeline class from the mapping
|
249 |
+
pipeline_class = PIPELINE_CLASSES.get(pipeline_name)
|
250 |
+
if not pipeline_class:
|
251 |
+
raise ValueError(f"Unsupported pipeline type '{pipeline_name}'. "
|
252 |
+
f"Available options: {list(PIPELINE_CLASSES.keys())}")
|
253 |
+
|
254 |
+
#initialize_cuda()
|
255 |
+
dvc = "cpu"
|
256 |
+
#from src.condition import Condition
|
257 |
+
pbar.update(1)
|
258 |
+
print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
|
259 |
+
#print(f"\n {get_torch_info()}\n")
|
260 |
+
# Initialize the pipeline inside the context manager
|
261 |
+
pipe = pipeline_class.from_pretrained(
|
262 |
+
model_name,
|
263 |
+
torch_dtype=torch.bfloat16 if device == "cuda" else torch.float16,
|
264 |
+
vae=good_vae
|
265 |
+
)
|
266 |
+
pbar.update(2)
|
267 |
+
pipe.to(dvc)
|
268 |
+
# Optionally, don't use CPU offload if not necessary
|
269 |
+
pbar.update(1)
|
270 |
+
|
271 |
+
# Access the tokenizer from the pipeline
|
272 |
+
tokenizer = pipe.tokenizer
|
273 |
+
|
274 |
+
# Check if add_prefix_space is set and convert to slow tokenizer if necessary
|
275 |
+
if getattr(tokenizer, 'add_prefix_space', False):
|
276 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True, device_map = 'cpu')
|
277 |
+
# Update the pipeline's tokenizer
|
278 |
+
pipe.tokenizer = tokenizer
|
279 |
+
|
280 |
+
pbar.set_description("Loading LoRA weights")
|
281 |
+
pbar.update(1)
|
282 |
+
pipe.unload_lora_weights()
|
283 |
+
|
284 |
+
# Load LoRA weights
|
285 |
+
# note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
|
286 |
+
if lora_weights:
|
287 |
+
for lora_weight in lora_weights:
|
288 |
+
lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
|
289 |
+
lora_weight_set = False
|
290 |
+
if lora_configs:
|
291 |
+
for config in lora_configs:
|
292 |
+
# Load LoRA weights with optional weight_name and adapter_name
|
293 |
+
if 'weight_name' in config:
|
294 |
+
weight_name = config.get("weight_name")
|
295 |
+
adapter_name = config.get("adapter_name")
|
296 |
+
lora_collection = config.get("lora_collection")
|
297 |
+
if weight_name and adapter_name and lora_collection and lora_weight_set == False:
|
298 |
+
pipe.load_lora_weights(
|
299 |
+
lora_collection,
|
300 |
+
weight_name=weight_name,
|
301 |
+
adapter_name=adapter_name,
|
302 |
+
token=constants.HF_API_TOKEN
|
303 |
+
)
|
304 |
+
lora_weight_set = True
|
305 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
|
306 |
+
elif weight_name and adapter_name==None and lora_collection and lora_weight_set == False:
|
307 |
+
pipe.load_lora_weights(
|
308 |
+
lora_collection,
|
309 |
+
weight_name=weight_name,
|
310 |
+
token=constants.HF_API_TOKEN
|
311 |
+
)
|
312 |
+
lora_weight_set = True
|
313 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
|
314 |
+
elif weight_name and adapter_name and lora_weight_set == False:
|
315 |
+
pipe.load_lora_weights(
|
316 |
+
lora_weight,
|
317 |
+
weight_name=weight_name,
|
318 |
+
adapter_name=adapter_name,
|
319 |
+
token=constants.HF_API_TOKEN
|
320 |
+
)
|
321 |
+
lora_weight_set = True
|
322 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
323 |
+
elif weight_name and adapter_name==None and lora_weight_set == False:
|
324 |
+
pipe.load_lora_weights(
|
325 |
+
lora_weight,
|
326 |
+
weight_name=weight_name,
|
327 |
+
token=constants.HF_API_TOKEN
|
328 |
+
)
|
329 |
+
lora_weight_set = True
|
330 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
331 |
+
elif lora_weight_set == False:
|
332 |
+
pipe.load_lora_weights(
|
333 |
+
lora_weight,
|
334 |
+
token=constants.HF_API_TOKEN
|
335 |
+
)
|
336 |
+
lora_weight_set = True
|
337 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
338 |
+
# Apply 'pipe' configurations if present
|
339 |
+
if 'pipe' in config:
|
340 |
+
pipe_config = config['pipe']
|
341 |
+
for method_name, params in pipe_config.items():
|
342 |
+
method = getattr(pipe, method_name, None)
|
343 |
+
if method:
|
344 |
+
print(f"Applying pipe method: {method_name} with params: {params}")
|
345 |
+
method(**params)
|
346 |
+
else:
|
347 |
+
print(f"Method {method_name} not found in pipe.")
|
348 |
+
if 'condition_type' in config:
|
349 |
+
condition_type = config['condition_type']
|
350 |
+
if condition_type == "coloring":
|
351 |
+
#pipe.enable_coloring()
|
352 |
+
print("\nEnabled coloring.\n")
|
353 |
+
elif condition_type == "deblurring":
|
354 |
+
#pipe.enable_deblurring()
|
355 |
+
print("\nEnabled deblurring.\n")
|
356 |
+
elif condition_type == "fill":
|
357 |
+
#pipe.enable_fill()
|
358 |
+
print("\nEnabled fill.\n")
|
359 |
+
elif condition_type == "depth":
|
360 |
+
#pipe.enable_depth()
|
361 |
+
print("\nEnabled depth.\n")
|
362 |
+
elif condition_type == "canny":
|
363 |
+
#pipe.enable_canny()
|
364 |
+
print("\nEnabled canny.\n")
|
365 |
+
elif condition_type == "subject":
|
366 |
+
#pipe.enable_subject()
|
367 |
+
print("\nEnabled subject.\n")
|
368 |
+
else:
|
369 |
+
print(f"Condition type {condition_type} not implemented.")
|
370 |
+
else:
|
371 |
+
pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
|
372 |
+
pbar.set_description("Pipe Loaded.")
|
373 |
+
pbar.set_postfix({"Status": "Done"})
|
374 |
+
pbar.update(1)
|
375 |
+
pbar.close()
|
376 |
|
|
|
|
|
|
|
377 |
|
378 |
@spaces.GPU(duration=200, progress=gr.Progress(track_tqdm=True))
|
379 |
def generate_image_lowmem(
|
|
|
382 |
model_name="black-forest-labs/FLUX.1-dev",
|
383 |
lora_weights=None,
|
384 |
conditioned_image=None,
|
385 |
+
mask_image=None,
|
386 |
image_width=1368,
|
387 |
image_height=848,
|
388 |
guidance_scale=3.5,
|
|
|
394 |
additional_parameters=None,
|
395 |
progress=gr.Progress(track_tqdm=True)
|
396 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
397 |
with torch.no_grad():
|
398 |
+
#global pipe
|
399 |
+
global device
|
400 |
+
pipe.to(device)
|
401 |
+
flash_attention_enabled = torch.backends.cuda.flash_sdp_enabled()
|
402 |
+
if flash_attention_enabled == False:
|
403 |
+
#Enable xFormers memory-efficient attention (optional)
|
404 |
+
#pipe.enable_xformers_memory_efficient_attention()
|
405 |
+
print("\nEnabled xFormers memory-efficient attention.\n")
|
406 |
+
else:
|
407 |
+
pipe.attn_implementation="flash_attention_2"
|
408 |
+
print("\nEnabled flash_attention_2.\n")
|
409 |
# alternative version that may be more efficient
|
410 |
# pipe.enable_sequential_cpu_offload()
|
411 |
if pipeline_name == "FluxPipeline":
|
|
|
415 |
else:
|
416 |
pipe.enable_model_cpu_offload()
|
417 |
|
|
|
|
|
418 |
|
419 |
+
mask_parameters = {}
|
420 |
+
# Load the mask image if provided
|
421 |
+
if (pipeline_name == "FluxFillPipeline"):
|
422 |
+
mask_image = open_image(mask_image).convert("RGBA")
|
423 |
+
mask_condition_type = constants.condition_type[5]
|
424 |
+
guidance_scale = 30
|
425 |
+
num_inference_steps=50
|
426 |
+
max_sequence_length=512
|
427 |
+
print(f"\nAdded mask image.\n {mask_image.size}")
|
428 |
+
mask_parameters ={
|
429 |
+
"mask_image": mask_image,
|
430 |
+
}
|
|
|
|
|
|
|
431 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
432 |
# Set the random seed for reproducibility
|
433 |
generator = torch.Generator(device=device).manual_seed(seed)
|
434 |
+
#conditions = []
|
435 |
if conditioned_image is not None:
|
436 |
+
conditioned_image = resize_and_crop_image(conditioned_image, image_width, image_height)
|
437 |
+
#condition = Condition(constants.condition_type[2], conditioned_image)
|
438 |
+
#conditions.append(condition)
|
439 |
print(f"\nAdded conditioned image.\n {conditioned_image.size}")
|
440 |
# Prepare the parameters for image generation
|
441 |
additional_parameters ={
|
442 |
"strength": strength,
|
443 |
"image": conditioned_image,
|
444 |
}
|
445 |
+
additional_parameters.update(mask_parameters)
|
446 |
else:
|
447 |
print("\nNo conditioned image provided.")
|
448 |
if neg_prompt!=None:
|
|
|
456 |
prompt, prompt2 = split_prompt_precisely(text)
|
457 |
prompt_parameters = {
|
458 |
"prompt" : prompt,
|
459 |
+
"prompt_2": prompt2,
|
460 |
}
|
461 |
else:
|
462 |
prompt_parameters = {
|
463 |
+
"prompt" :text,
|
464 |
}
|
465 |
additional_parameters.update(prompt_parameters)
|
466 |
# Combine all parameters
|
|
|
469 |
"width": image_width,
|
470 |
"guidance_scale": guidance_scale,
|
471 |
"num_inference_steps": num_inference_steps,
|
472 |
+
"generator": generator,
|
473 |
+
}
|
474 |
if additional_parameters:
|
475 |
generate_params.update(additional_parameters)
|
476 |
generate_params = {k: v for k, v in generate_params.items() if v is not None}
|
477 |
print(f"generate_params: {generate_params}")
|
478 |
# Generate the image
|
479 |
+
try:
|
480 |
+
result = pipe(**generate_params) #generate_image(pipe,generate_params)
|
481 |
+
image = result.images[0]
|
482 |
+
# Clean up
|
483 |
+
del result
|
484 |
+
except Exception as e:
|
485 |
+
print(f"Error generating image: {e}")
|
486 |
+
image = open_image("./images/Beeuty-1.png")
|
487 |
+
#del conditions
|
488 |
+
del generator
|
489 |
+
# Move the pipeline and clear cache
|
490 |
+
pipe.to("cpu")
|
491 |
torch.cuda.empty_cache()
|
492 |
torch.cuda.ipc_collect()
|
493 |
print(torch.cuda.memory_summary(device=None, abbreviated=False))
|
494 |
+
gc.collect()
|
495 |
return image
|
496 |
|
497 |
def generate_ai_image_local (
|
|
|
501 |
model="black-forest-labs/FLUX.1-dev",
|
502 |
lora_weights=None,
|
503 |
conditioned_image=None,
|
504 |
+
mask_image=None,
|
505 |
height=512,
|
506 |
width=912,
|
507 |
num_inference_steps=30,
|
|
|
510 |
pipeline_name="FluxPipeline",
|
511 |
strength=0.75,
|
512 |
progress=gr.Progress(track_tqdm=True)
|
513 |
+
):
|
514 |
print(f"Generating image with lowmem")
|
515 |
try:
|
516 |
if map_option != "Prompt":
|
|
|
553 |
print(f"Conditioned Image: {conditioned_image}")
|
554 |
print(f"Conditioned Image Strength: {strength}")
|
555 |
print(f"pipeline: {pipeline_name}")
|
556 |
+
set_pipeline(
|
557 |
+
model_name=model,
|
558 |
+
lora_weights=lora_weights,
|
559 |
+
pipeline_name=pipeline_name,
|
560 |
+
progress=progress
|
561 |
+
)
|
562 |
image = generate_image_lowmem(
|
563 |
text=prompt,
|
564 |
model_name=model,
|
565 |
neg_prompt=negative_prompt,
|
566 |
lora_weights=lora_weights,
|
567 |
conditioned_image=conditioned_image,
|
568 |
+
mask_image=mask_image,
|
569 |
image_width=width,
|
570 |
image_height=height,
|
571 |
guidance_scale=guidance_scale,
|
|
|
573 |
seed=seed,
|
574 |
pipeline_name=pipeline_name,
|
575 |
strength=strength,
|
576 |
+
additional_parameters=additional_parameters,
|
577 |
+
progress=progress
|
578 |
)
|
579 |
with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
|
580 |
image.save(tmp.name, format="PNG")
|
|
|
583 |
return tmp.name
|
584 |
except Exception as e:
|
585 |
print(f"Error generating AI image: {e}")
|
586 |
+
gc.collect()
|
587 |
return None
|
588 |
|
589 |
|
590 |
+
def generate_input_image_click(image_input, map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, randomize_seed=True, seed=None, use_conditioned_image=False, mask_image=None, strength=0.5, image_format="16:9", scale_factor=constants.SCALE_FACTOR, progress=gr.Progress(track_tqdm=True)):
|
591 |
seed = get_seed(randomize_seed, seed)
|
592 |
|
593 |
# Get the model and LoRA weights
|
|
|
604 |
print(f"Conditioned Image: {conditioned_image.size}.. converted to RGB\n")
|
605 |
# use image_input as the conditioned_image if it is not None
|
606 |
elif image_input is not None:
|
607 |
+
file_path, is_dict = get_image_from_dict(image_input)
|
608 |
+
conditioned_image = open_image(file_path).convert("RGB")
|
609 |
+
print(f"Conditioned Image set to modify Input Image!\nClear to generate new image from layered image: {is_dict}\n")
|
610 |
+
gr.Info(f"Conditioned Image set to modify Input Image! Clear to generate new image. Layered: {is_dict}",duration=5)
|
611 |
|
612 |
# Convert image_format from a string split by ":" into two numbers divided
|
613 |
width_ratio, height_ratio = map(int, image_format.split(":"))
|
614 |
aspect_ratio = width_ratio / height_ratio
|
615 |
|
616 |
+
width, height = convert_ratio_to_dimensions(aspect_ratio, constants.BASE_HEIGHT)
|
617 |
pipeline = "FluxPipeline"
|
618 |
if conditioned_image is not None:
|
619 |
pipeline = "FluxImg2ImgPipeline"
|
620 |
+
|
621 |
+
if (model == "black-forest-labs/FLUX.1-Fill-dev"):
|
622 |
+
pipeline = "FluxFillPipeline"
|
623 |
+
width, height = calculate_optimal_fill_dimensions(conditioned_image)
|
624 |
+
mask_image = get_image_from_dict(mask_image)
|
625 |
+
print(f"Optimal Dimensions: {width} x {height} \n")
|
626 |
+
else:
|
627 |
+
mask_image = None
|
628 |
+
|
629 |
# Generate the AI image and get the image path
|
630 |
image_path = generate_ai_image_local(
|
631 |
map_option,
|
|
|
634 |
model,
|
635 |
lora_weights,
|
636 |
conditioned_image,
|
637 |
+
mask_image,
|
638 |
strength=strength,
|
639 |
height=height,
|
640 |
width=width,
|
|
|
672 |
def update_prompt_notes(model_textbox_value):
|
673 |
return upd_prompt_notes(model_textbox_value)
|
674 |
|
675 |
+
def update_selection(evt: gr.SelectData, aspect_ratio):
|
676 |
+
selected_lora = constants.LORAS[evt.index]
|
677 |
+
new_placeholder = f"Type a prompt for {selected_lora['title']}"
|
678 |
+
new_aspect_ratio = aspect_ratio # default to the currently selected aspect ratio
|
679 |
+
lora_repo = selected_lora["repo"]
|
680 |
+
#updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
|
681 |
+
# If the selected LoRA model specifies an aspect ratio, use it to update dimensions.
|
682 |
+
if "aspect" in selected_lora:
|
683 |
+
try:
|
684 |
+
new_aspect_ratio = selected_lora["aspect"]
|
685 |
+
# Recalculate dimensions using constants.BASE_HEIGHT as the height reference.
|
686 |
+
new_width, new_height = update_dimensions_on_ratio(new_aspect_ratio, constants.BASE_HEIGHT)
|
687 |
+
# (Optionally, you could log or use new_width/new_height as needed)
|
688 |
+
except Exception as e:
|
689 |
+
print(f"\nError in update selection aspect ratios: {e}\nSkipping")
|
690 |
+
return [gr.update(value=lora_repo), gr.update(value=lora_repo), evt.index, new_aspect_ratio, upd_prompt_notes_by_index(evt.index)]
|
691 |
+
|
692 |
def on_prerendered_gallery_selection(event_data: gr.SelectData):
|
693 |
global current_prerendered_image
|
694 |
selected_index = event_data.index
|
|
|
725 |
sk_img_path, _ = get_image_from_dict(sketch_image)
|
726 |
sk_img = open_image(sk_img_path)
|
727 |
# Resize sketch image if dimensions don't match input image.
|
728 |
+
if (in_img) and (in_img.size != sk_img.size):
|
729 |
sk_img = sk_img.resize(in_img.size, Image.LANCZOS)
|
730 |
+
return [sk_img, gr.update(width=in_img.width, height=in_img.height)]
|
731 |
+
else:
|
732 |
+
return [sk_img, gr.update()]
|
733 |
+
|
734 |
|
735 |
def composite_with_control_sync(input_image, sketch_image, slider_value):
|
736 |
# Load the images using open_image() if they are provided as file paths.
|
|
|
751 |
sketch, is_dict = get_image_from_dict(sketch_image)
|
752 |
return sketch
|
753 |
####################################### DEPTH ESTIMATION #######################################
|
754 |
+
def load_3d_models(is_open: bool = True) -> bool:
|
755 |
+
if is_open:
|
756 |
+
gr.Info("Loading 3D models...")
|
757 |
+
global image_processor, depth_model, TRELLIS_PIPELINE
|
758 |
+
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
|
759 |
+
depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large", ignore_mismatched_sizes=True)
|
760 |
+
TRELLIS_PIPELINE = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
|
761 |
+
try:
|
762 |
+
# Preload with a dummy image to finalize initialization
|
763 |
+
TRELLIS_PIPELINE.preprocess_image(Image.fromarray(np.zeros((256, 256, 3), dtype=np.uint8)))
|
764 |
+
except Exception as e:
|
765 |
+
print(f"Error preloading TRELLIS_PIPELINE: {e}")
|
766 |
+
print("3D models loaded")
|
767 |
+
gr.Info("3D models loaded.")
|
768 |
+
return gr.update(interactive = is_open)
|
769 |
+
|
770 |
+
def unload_3d_models(is_open: bool = False) -> bool:
|
771 |
+
if not is_open:
|
772 |
+
gr.Info("Unloading 3D models...")
|
773 |
+
global image_processor, depth_model, TRELLIS_PIPELINE
|
774 |
+
TRELLIS_PIPELINE.to("cpu")
|
775 |
+
del image_processor
|
776 |
+
del depth_model
|
777 |
+
del TRELLIS_PIPELINE
|
778 |
+
#torch.cuda.empty_cache()
|
779 |
+
#torch.cuda.ipc_collect()
|
780 |
+
gc.collect()
|
781 |
+
print("3D models unloaded and CUDA memory freed")
|
782 |
+
gr.Info("3D models unloaded.")
|
783 |
+
return gr.update(interactive = is_open)
|
784 |
|
785 |
def preprocess_image(image: Image.Image) -> Image.Image:
|
786 |
"""
|
|
|
921 |
depth_img = Image.open(depth_img).convert("RGBA")
|
922 |
# Preprocess and run the Trellis pipeline with fixed sampler settings
|
923 |
try:
|
924 |
+
TRELLIS_PIPELINE.to(device)
|
925 |
processed_image = TRELLIS_PIPELINE.preprocess_image(resized_image, max_resolution=model_resolution)
|
926 |
outputs = TRELLIS_PIPELINE.run(
|
927 |
processed_image,
|
|
|
1056 |
gr.set_static_paths(paths=["images/","images/images","images/prerendered","LUT/","fonts/","assets/"])
|
1057 |
|
1058 |
# Gradio Blocks Interface
|
1059 |
+
with gr.Blocks(css_paths="style_20250314.css", title=title, theme='Surn/beeuty',delete_cache=(21600,86400)) as hexaGrid:
|
1060 |
with gr.Row():
|
1061 |
gr.Markdown("""
|
1062 |
# HexaGrid Creator
|
|
|
1113 |
key="imgInput",
|
1114 |
image_mode=None,
|
1115 |
format="PNG",
|
1116 |
+
height=450,
|
1117 |
+
width=800
|
1118 |
)
|
1119 |
with gr.Accordion("Sketch Pad", open = False, elem_id="sketchpd"):
|
1120 |
with gr.Row():
|
|
|
1124 |
#invert_colors=True,
|
1125 |
#sources=['upload','canvas'],
|
1126 |
#tool=['editor','select','color-sketch'],
|
1127 |
+
placeholder="Draw a sketch or upload an image.",
|
1128 |
interactive=True,
|
1129 |
elem_classes="centered solid imgcontainer",
|
1130 |
key="imgSketch",
|
1131 |
image_mode="RGBA",
|
1132 |
format="PNG",
|
1133 |
+
brush=gr.Brush(),
|
1134 |
+
canvas_size=(640,360)
|
1135 |
)
|
1136 |
with gr.Row():
|
1137 |
with gr.Column(scale=1):
|
1138 |
+
sketch_replace_input_image_button = gr.Button("Replace Input Image with sketch", elem_id="sketch_replace_input_image_button", elem_classes="solid")
|
|
|
|
|
|
|
|
|
1139 |
sketch_alpha_composite_slider = gr.Slider(0,100,50,0.5, label="Sketch Transparancy", elem_id="alpha_composite_slider")
|
1140 |
btn_sketch_alpha_composite = gr.Button("Overlay Sketch on Input Image", elem_id="btn_sketchninput", elem_classes="solid")
|
1141 |
+
gr.Markdown("### Do Not add to image if using a fill model")
|
1142 |
|
1143 |
with gr.Column():
|
1144 |
with gr.Accordion("Hex Coloring and Exclusion", open = False):
|
|
|
1188 |
examples=[[f] for f in constants.lut_files],
|
1189 |
inputs=[lut_filename],
|
1190 |
outputs=[lut_filename],
|
1191 |
+
label="Select a Filter (LUT) file. Populate the LUT File Name field",
|
1192 |
+
examples_per_page = 15,
|
1193 |
)
|
1194 |
|
1195 |
with gr.Row():
|
|
|
1219 |
value="Cossale/Frames2-Flex.1",
|
1220 |
elem_classes="solid",
|
1221 |
elem_id="inference_model",
|
1222 |
+
lines=2,
|
1223 |
visible=False
|
1224 |
)
|
1225 |
+
with gr.Accordion("Choose Style Model*", open=False):
|
1226 |
+
lora_gallery = gr.Gallery(
|
1227 |
+
[(open_image(image_path), title) for image_path, title in lora_models],
|
1228 |
+
label="Styles",
|
1229 |
+
allow_preview=False, preview=False ,
|
1230 |
+
columns=2,
|
1231 |
+
elem_id="lora_gallery",
|
1232 |
+
show_share_button=False,
|
1233 |
+
elem_classes="solid", type="filepath",
|
1234 |
+
object_fit="contain", height="auto", format="png",
|
1235 |
+
)
|
1236 |
# Update map_options to a Dropdown with choices from constants.PROMPTS keys
|
1237 |
with gr.Row():
|
1238 |
with gr.Column():
|
|
|
1293 |
with gr.Row():
|
1294 |
with gr.Column(scale=2):
|
1295 |
# Gallery from PRE_RENDERED_IMAGES GOES HERE
|
1296 |
+
prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images_by_quality(3,'thumbnail'), elem_id="gallery",
|
1297 |
+
elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto", format="png",allow_preview=False)
|
1298 |
with gr.Column():
|
1299 |
image_guidance_stength = gr.Slider(label="Image Guidance Strength (prompt percentage)", minimum=0, maximum=1.0, value=0.85, step=0.01, interactive=True)
|
1300 |
replace_input_image_button = gr.Button(
|
|
|
1319 |
y_spacing = gr.Number(label="Adjust Vertical spacing", value=3, minimum=-200, maximum=200, precision=1)
|
1320 |
with gr.Row():
|
1321 |
rotation = gr.Slider(-90, 180, 0.0, 0.1, label="Hexagon Rotation (degree)")
|
1322 |
+
add_hex_text = gr.Dropdown(label="Add Text to Hexagons", choices=[None, "Row-Column Coordinates", "Column Letter, Row Number", "Column Number, Row Letter", "Sequential Numbers", "Playing Cards Sequential", "Playing Cards Alternate Red and Black", "Custom List"], value=None)
|
1323 |
with gr.Row():
|
1324 |
custom_text_list = gr.TextArea(label="Custom Text List", value=constants.cards_alternating, visible=False,)
|
1325 |
custom_text_color_list = gr.TextArea(label="Custom Text Color List", value=constants.card_colors_alternating, visible=False)
|
1326 |
with gr.Row():
|
1327 |
hex_text_info = gr.Markdown("""
|
1328 |
### Text Color uses the Border Color and Border Opacity, unless you use a custom list.
|
1329 |
+
### The Custom Text List and Custom Text Color List are repeating comma separated lists.
|
1330 |
### The custom color list is a comma separated list of hex colors.
|
1331 |
#### Example: "A,2,3,4,5,6,7,8,9,10,J,Q,K", "red,#0000FF,#00FF00,red,#FFFF00,#00FFFF,#FF8000,#FF00FF,#FF0080,#FF8000,#FF0080,lightblue"
|
1332 |
""", elem_id="hex_text_info", visible=False)
|
|
|
1369 |
add_border_button = gr.Button("Add Margins", elem_classes="solid", variant="secondary")
|
1370 |
with gr.Row():
|
1371 |
bordered_image_output = gr.Image(label="Image with Margins", image_mode="RGBA", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgBordered",interactive=False, show_download_button=True, show_fullscreen_button=True, show_share_button=True)
|
1372 |
+
accordian_3d = gr.Accordion("Height Maps and 3D", open=False, elem_id="accordian_3d")
|
1373 |
+
with accordian_3d:
|
1374 |
with gr.Row():
|
1375 |
depth_image_source = gr.Radio(
|
1376 |
label="Depth Image Source",
|
|
|
1388 |
video_resolution = gr.Slider(384, 768, value=480, step=32, label="Video Resolution (*danger*)", interactive=True)
|
1389 |
model_resolution = gr.Slider(512, 2304, value=1024, step=64, label="3D Model Resolution", interactive=True)
|
1390 |
with gr.Row():
|
1391 |
+
generate_3d_asset_button = gr.Button("Generate 3D Asset", elem_classes="solid", variant="secondary", interactive=False)
|
1392 |
with gr.Row():
|
1393 |
depth_output = gr.Image(label="Depth Map", image_mode="L", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="DepthOutput",interactive=False, show_download_button=True, show_fullscreen_button=True, show_share_button=True, height=400)
|
1394 |
with gr.Row():
|
|
|
1449 |
)
|
1450 |
generate_input_image.click(
|
1451 |
fn=generate_input_image_click,
|
1452 |
+
inputs=[input_image,map_options, prompt_textbox, negative_prompt_textbox, model_textbox, randomize_seed, seed_slider, gr.State(False), sketch_image, image_guidance_stength, image_size_ratio],
|
1453 |
outputs=[input_image, seed_slider], scroll_to_output=True
|
1454 |
).then(
|
1455 |
fn=update_sketch_dimensions,
|
|
|
1489 |
inputs=model_options,
|
1490 |
outputs=prompt_notes_label
|
1491 |
)
|
1492 |
+
lora_gallery.select(
|
1493 |
+
fn=update_selection,
|
1494 |
+
inputs=[image_size_ratio],
|
1495 |
+
outputs=[model_textbox, model_options, gr.State(selected_index), image_size_ratio, prompt_notes_label]
|
1496 |
+
)
|
1497 |
+
|
1498 |
+
#################### model end ########################################
|
1499 |
+
|
1500 |
composite_button.click(
|
1501 |
fn=lambda input_image, composite_color, composite_opacity: gr.Warning("Please upload an Input Image to get started.") if input_image is None else change_color(input_image, composite_color, composite_opacity),
|
1502 |
inputs=[input_image, composite_color, composite_opacity],
|
|
|
1506 |
#use conditioned_image as the input_image for generate_input_image_click
|
1507 |
generate_input_image_from_gallery.click(
|
1508 |
fn=generate_input_image_click,
|
1509 |
+
inputs=[input_image, map_options, prompt_textbox, negative_prompt_textbox, model_textbox,randomize_seed, seed_slider, gr.State(True), sketch_image , image_guidance_stength, image_size_ratio],
|
1510 |
outputs=[input_image, seed_slider], scroll_to_output=True
|
1511 |
).then(
|
1512 |
fn=update_sketch_dimensions,
|
|
|
1561 |
# inputs=[depth_image_source, resized_width_slider, z_scale_slider, input_image, output_image, overlay_image, bordered_image_output],
|
1562 |
# outputs=[depth_map_output, model_output, model_file], scroll_to_output=True
|
1563 |
# )
|
1564 |
+
accordian_3d.expand(
|
1565 |
+
fn=load_3d_models,
|
1566 |
+
trigger_mode="always_last",
|
1567 |
+
outputs=[generate_3d_asset_button],
|
1568 |
+
show_api=False
|
1569 |
+
)
|
1570 |
+
accordian_3d.collapse(
|
1571 |
+
fn=unload_3d_models,
|
1572 |
+
trigger_mode="always_last",
|
1573 |
+
outputs=[generate_3d_asset_button],
|
1574 |
+
show_api=False
|
1575 |
+
)
|
1576 |
# Chain the buttons
|
1577 |
generate_3d_asset_button.click(
|
1578 |
fn=generate_3d_asset_part1,
|
|
|
1628 |
|
1629 |
#-------------- ------------------------------------------------MODEL INITIALIZATION------------------------------------------------------------#
|
1630 |
# Load models once during module import
|
1631 |
+
dtype = torch.bfloat16
|
1632 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
1633 |
+
base_model = "black-forest-labs/FLUX.1-dev"
|
1634 |
+
good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
|
1635 |
+
#pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=good_vae).to(device)
|
1636 |
+
#pipe.enable_model_cpu_offload()
|
1637 |
+
#pipe.vae.enable_slicing()
|
1638 |
+
#pipe.attn_implementation="flash_attention_2"
|
1639 |
+
|
1640 |
+
# image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
|
1641 |
+
# depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large", ignore_mismatched_sizes=True)
|
1642 |
+
# TRELLIS_PIPELINE = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
|
1643 |
+
# TRELLIS_PIPELINE.to(device)
|
1644 |
+
# try:
|
1645 |
+
# TRELLIS_PIPELINE.preprocess_image(Image.fromarray(np.zeros((64, 64, 3), dtype=np.uint8))) # Preload rembg
|
1646 |
+
# except:
|
1647 |
+
# pass
|
1648 |
hexaGrid.queue(default_concurrency_limit=1,max_size=12,api_open=False)
|
1649 |
hexaGrid.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered", 'e:/TMP'], favicon_path="./assets/favicon.ico", max_file_size="10mb")
|
1650 |
|
images/prerendered/th/FLUX.1-Fill-dev.png
ADDED
![]() |
Git LFS Details
|
images/prerendered/th/FLUX.1-dev.png
ADDED
![]() |
Git LFS Details
|
images/prerendered/th/FLUX.1-schnell.png
ADDED
![]() |
Git LFS Details
|
images/prerendered/th/Flex.1-alpha.png
ADDED
![]() |
Git LFS Details
|
style_20250128.css → style_20250314.css
RENAMED
@@ -20,9 +20,15 @@
|
|
20 |
background-color: rgba(242, 218, 163, 0.62);
|
21 |
}
|
22 |
|
23 |
-
.dark .gradio-container.gradio-container-5-
|
24 |
background-color: rgba(41, 18, 5, 0.38) !important;
|
25 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
.small {
|
28 |
font-size: smaller !important;
|
@@ -72,26 +78,26 @@ a {
|
|
72 |
max-width: 75px;
|
73 |
}
|
74 |
|
75 |
-
#gallery .thumbnails {
|
76 |
flex-direction: column !important;
|
77 |
display: inline-flex !important;
|
78 |
flex-wrap: wrap !important;
|
79 |
position: relative !important;
|
80 |
}
|
81 |
|
82 |
-
#gallery caption.caption {
|
83 |
flex-direction: row !important;
|
84 |
display: inline-flex !important;
|
85 |
flex-wrap: wrap;
|
86 |
white-space: unset !important;
|
87 |
}
|
88 |
|
89 |
-
#gallery .image-button img.with-caption {
|
90 |
object-fit: cover !important;
|
91 |
object-position: center !important;
|
92 |
}
|
93 |
|
94 |
-
#gallery button.preview {
|
95 |
position: relative !important;
|
96 |
}
|
97 |
|
|
|
20 |
background-color: rgba(242, 218, 163, 0.62);
|
21 |
}
|
22 |
|
23 |
+
.dark .gradio-container.gradio-container-5-21-0 .contain .intro .prose {
|
24 |
background-color: rgba(41, 18, 5, 0.38) !important;
|
25 |
}
|
26 |
+
.toast-body.info {
|
27 |
+
background-color: rgba(242, 218, 163, 0.75);
|
28 |
+
}
|
29 |
+
.dark .toast-body.info {
|
30 |
+
background-color: rgba(128, 128, 128, 0.75);
|
31 |
+
}
|
32 |
|
33 |
.small {
|
34 |
font-size: smaller !important;
|
|
|
78 |
max-width: 75px;
|
79 |
}
|
80 |
|
81 |
+
#gallery .thumbnails, #lora_gallery .thumbnails {
|
82 |
flex-direction: column !important;
|
83 |
display: inline-flex !important;
|
84 |
flex-wrap: wrap !important;
|
85 |
position: relative !important;
|
86 |
}
|
87 |
|
88 |
+
#gallery caption.caption, #lora_gallery caption.caption {
|
89 |
flex-direction: row !important;
|
90 |
display: inline-flex !important;
|
91 |
flex-wrap: wrap;
|
92 |
white-space: unset !important;
|
93 |
}
|
94 |
|
95 |
+
#gallery .image-button img.with-caption, #lora_gallery .image-button img.with-caption {
|
96 |
object-fit: cover !important;
|
97 |
object-position: center !important;
|
98 |
}
|
99 |
|
100 |
+
#gallery button.preview, #lora_gallery button.preview {
|
101 |
position: relative !important;
|
102 |
}
|
103 |
|
utils/constants.py
CHANGED
@@ -62,6 +62,8 @@ if not HF_API_TOKEN:
|
|
62 |
default_lut_example_img = "./LUT/daisy.jpg"
|
63 |
MAX_SEED = np.iinfo(np.int32).max
|
64 |
TARGET_SIZE = (2688,1536)
|
|
|
|
|
65 |
TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
|
66 |
os.makedirs(TMPDIR, exist_ok=True)
|
67 |
|
@@ -185,7 +187,7 @@ PRE_RENDERED_MAPS_JSON_LEVELS = {
|
|
185 |
|
186 |
pre_rendered_maps_paths = [
|
187 |
map_file['file'].replace("\\", "/")
|
188 |
-
for
|
189 |
PRE_RENDERED_MAPS_JSON_LEVELS.items(),
|
190 |
key=lambda x: (
|
191 |
x[1]['quality'],
|
@@ -199,43 +201,59 @@ pre_rendered_maps_paths_api_file = [f"./gradio_api/file={map_path}" for map_path
|
|
199 |
MODELS = [
|
200 |
"black-forest-labs/FLUX.1-schnell",
|
201 |
"black-forest-labs/FLUX.1-dev",
|
202 |
-
"ostris/Flex.1-alpha"
|
|
|
203 |
]
|
204 |
# Available LoRA weights
|
205 |
LORA_WEIGHTS = [
|
206 |
"Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design",
|
|
|
207 |
"Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1",
|
208 |
"Cossale/Frames2-Flex.1",
|
|
|
209 |
"XLabs-AI/flux-lora-collection/anime_lora.safetensors",
|
210 |
"XLabs-AI/flux-lora-collection/scenery_lora.safetensors",
|
211 |
"XLabs-AI/flux-lora-collection/disney_lora.safetensors",
|
212 |
-
"XLabs-AI/flux-RealismLora"
|
|
|
|
|
|
|
|
|
213 |
]
|
214 |
|
215 |
# Map each LoRA weight to its corresponding model
|
216 |
LORA_TO_MODEL = {
|
217 |
"Yuanshi/OminiControl": "black-forest-labs/FLUX.1-schnell",
|
218 |
"Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design": "black-forest-labs/FLUX.1-dev",
|
|
|
219 |
"Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1": "black-forest-labs/FLUX.1-dev",
|
220 |
"Cossale/Frames2-Flex.1": "ostris/Flex.1-alpha",
|
221 |
"AlekseyCalvin/HSTcolorFlexAlpha": "ostris/Flex.1-alpha",
|
222 |
"XLabs-AI/flux-lora-collection/anime_lora.safetensors":"black-forest-labs/FLUX.1-dev",
|
223 |
"XLabs-AI/flux-lora-collection/scenery_lora.safetensors":"black-forest-labs/FLUX.1-dev",
|
224 |
"XLabs-AI/flux-lora-collection/disney_lora.safetensors":"black-forest-labs/FLUX.1-dev",
|
225 |
-
"XLabs-AI/flux-RealismLora":"black-forest-labs/FLUX.1-dev"
|
|
|
|
|
|
|
|
|
226 |
}
|
227 |
condition_type = ["depth", "canny", "subject", "coloring", "deblurring", "fill", "redux"]
|
|
|
228 |
# Detailed LoRA weight configurations
|
229 |
LORA_DETAILS = {
|
230 |
"AlekseyCalvin/HSTcolorFlexAlpha" : [
|
231 |
{
|
232 |
-
"trigger_words": "(6kStepsCkpt)
|
233 |
},
|
234 |
{
|
235 |
"weight_name": "HSToricColorFlex_6000steps_LoRAforFluxOrFlex_32dim64alpha.safetensors",
|
236 |
},
|
237 |
{
|
238 |
-
"notes": "
|
|
|
|
|
|
|
239 |
},
|
240 |
{
|
241 |
"parameters" :{
|
@@ -243,7 +261,8 @@ LORA_DETAILS = {
|
|
243 |
}
|
244 |
},
|
245 |
{
|
246 |
-
"thumbnail": ""
|
|
|
247 |
}
|
248 |
],
|
249 |
"XLabs-AI/flux-lora-collection/anime_lora.safetensors":[
|
@@ -254,6 +273,9 @@ LORA_DETAILS = {
|
|
254 |
{
|
255 |
"trigger_words": "anime,",
|
256 |
},
|
|
|
|
|
|
|
257 |
{
|
258 |
"notes": "You should use ',anime' as trigger words at the end. "
|
259 |
},
|
@@ -263,7 +285,8 @@ LORA_DETAILS = {
|
|
263 |
}
|
264 |
},
|
265 |
{
|
266 |
-
"thumbnail": ""
|
|
|
267 |
}
|
268 |
],
|
269 |
"XLabs-AI/flux-lora-collection/scenery_lora.safetensors":[
|
@@ -274,6 +297,9 @@ LORA_DETAILS = {
|
|
274 |
{
|
275 |
"trigger_words": "scenery style,",
|
276 |
},
|
|
|
|
|
|
|
277 |
{
|
278 |
"notes": "You should use ',scenery style' as trigger words at the end. "
|
279 |
},
|
@@ -283,7 +309,8 @@ LORA_DETAILS = {
|
|
283 |
}
|
284 |
},
|
285 |
{
|
286 |
-
"thumbnail": ""
|
|
|
287 |
}
|
288 |
],
|
289 |
"XLabs-AI/flux-lora-collection/disney_lora.safetensors":[
|
@@ -294,6 +321,9 @@ LORA_DETAILS = {
|
|
294 |
{
|
295 |
"trigger_words": "disney style,",
|
296 |
},
|
|
|
|
|
|
|
297 |
{
|
298 |
"notes": "You should use ',disney style' as trigger words at the end. "
|
299 |
},
|
@@ -303,13 +333,17 @@ LORA_DETAILS = {
|
|
303 |
}
|
304 |
},
|
305 |
{
|
306 |
-
"thumbnail": ""
|
|
|
307 |
}
|
308 |
],
|
309 |
"XLabs-AI/flux-RealismLora":[
|
310 |
{
|
311 |
"notes": "No trigger words but 8k, Animatrix illustration style, fantasy style, natural photo cinematic should all work "
|
312 |
},
|
|
|
|
|
|
|
313 |
{
|
314 |
"parameters" :{
|
315 |
"guidance_scale": "3.2",
|
@@ -318,7 +352,8 @@ LORA_DETAILS = {
|
|
318 |
}
|
319 |
},
|
320 |
{
|
321 |
-
"thumbnail": ""
|
|
|
322 |
}
|
323 |
],
|
324 |
"Cossale/Frames2-Flex.1": [
|
@@ -329,6 +364,9 @@ LORA_DETAILS = {
|
|
329 |
{
|
330 |
"trigger_words": "FRM$",
|
331 |
},
|
|
|
|
|
|
|
332 |
{
|
333 |
"notes": " FRM$ used as trigger words. "
|
334 |
},
|
@@ -338,7 +376,8 @@ LORA_DETAILS = {
|
|
338 |
}
|
339 |
},
|
340 |
{
|
341 |
-
"thumbnail": ""
|
|
|
342 |
}
|
343 |
],
|
344 |
"Yuanshi/OminiControl": [
|
@@ -350,6 +389,9 @@ LORA_DETAILS = {
|
|
350 |
"weight_name": "omini/subject_1024_beta.safetensors",
|
351 |
"adapter_name": "subject_1024"
|
352 |
},
|
|
|
|
|
|
|
353 |
{
|
354 |
"parameters" :{
|
355 |
"num_inference_steps": "8",
|
@@ -360,7 +402,8 @@ LORA_DETAILS = {
|
|
360 |
"notes": "Select an Image as a guide."
|
361 |
},
|
362 |
{
|
363 |
-
"thumbnail": ""
|
|
|
364 |
}
|
365 |
],
|
366 |
"Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design": [
|
@@ -370,6 +413,9 @@ LORA_DETAILS = {
|
|
370 |
{
|
371 |
"trigger_words": "wablogo, logo, Minimalist ",
|
372 |
},
|
|
|
|
|
|
|
373 |
{
|
374 |
"pipe" :{
|
375 |
"fuse_lora": {"lora_scale":0.8}
|
@@ -381,7 +427,38 @@ LORA_DETAILS = {
|
|
381 |
}
|
382 |
},
|
383 |
{
|
384 |
-
"thumbnail": ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
385 |
}
|
386 |
],
|
387 |
"Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1": [
|
@@ -391,6 +468,9 @@ LORA_DETAILS = {
|
|
391 |
{
|
392 |
"trigger_words": "fractalLand ",
|
393 |
},
|
|
|
|
|
|
|
394 |
{
|
395 |
"parameters" :{
|
396 |
"max_sequence_length": "512",
|
@@ -399,10 +479,134 @@ LORA_DETAILS = {
|
|
399 |
}
|
400 |
},
|
401 |
{
|
402 |
-
"thumbnail": ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
403 |
}
|
404 |
]
|
405 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
406 |
# Read the contents of the LUT folder, output to a list
|
407 |
lut_folder = "./LUT"
|
408 |
lut_files = [os.path.join(lut_folder, f).replace("\\", "/") for f in os.listdir(lut_folder) if f.endswith(".cube")]
|
|
|
62 |
default_lut_example_img = "./LUT/daisy.jpg"
|
63 |
MAX_SEED = np.iinfo(np.int32).max
|
64 |
TARGET_SIZE = (2688,1536)
|
65 |
+
BASE_HEIGHT = 576
|
66 |
+
SCALE_FACTOR = (8/3)
|
67 |
TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
|
68 |
os.makedirs(TMPDIR, exist_ok=True)
|
69 |
|
|
|
187 |
|
188 |
pre_rendered_maps_paths = [
|
189 |
map_file['file'].replace("\\", "/")
|
190 |
+
for map_name, map_file in sorted(
|
191 |
PRE_RENDERED_MAPS_JSON_LEVELS.items(),
|
192 |
key=lambda x: (
|
193 |
x[1]['quality'],
|
|
|
201 |
MODELS = [
|
202 |
"black-forest-labs/FLUX.1-schnell",
|
203 |
"black-forest-labs/FLUX.1-dev",
|
204 |
+
"ostris/Flex.1-alpha",
|
205 |
+
"black-forest-labs/FLUX.1-Fill-dev"
|
206 |
]
|
207 |
# Available LoRA weights
|
208 |
LORA_WEIGHTS = [
|
209 |
"Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design",
|
210 |
+
"Shakker-Labs/FLUX.1-dev-LoRA-Micro-landscape-on-Mobile-Phone",
|
211 |
"Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1",
|
212 |
"Cossale/Frames2-Flex.1",
|
213 |
+
"AlekseyCalvin/HSTcolorFlexAlpha",
|
214 |
"XLabs-AI/flux-lora-collection/anime_lora.safetensors",
|
215 |
"XLabs-AI/flux-lora-collection/scenery_lora.safetensors",
|
216 |
"XLabs-AI/flux-lora-collection/disney_lora.safetensors",
|
217 |
+
"XLabs-AI/flux-RealismLora",
|
218 |
+
"strangerzonehf/Flux-Cute-3D-Kawaii-LoRA",
|
219 |
+
"SebastianBodza/flux_cute3D",
|
220 |
+
"gokaygokay/Flux-Seamless-Texture-LoRA",
|
221 |
+
"gokaygokay/Flux-Game-Assets-LoRA-v2",
|
222 |
]
|
223 |
|
224 |
# Map each LoRA weight to its corresponding model
|
225 |
LORA_TO_MODEL = {
|
226 |
"Yuanshi/OminiControl": "black-forest-labs/FLUX.1-schnell",
|
227 |
"Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design": "black-forest-labs/FLUX.1-dev",
|
228 |
+
"Shakker-Labs/FLUX.1-dev-LoRA-Micro-landscape-on-Mobile-Phone": "black-forest-labs/FLUX.1-dev",
|
229 |
"Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1": "black-forest-labs/FLUX.1-dev",
|
230 |
"Cossale/Frames2-Flex.1": "ostris/Flex.1-alpha",
|
231 |
"AlekseyCalvin/HSTcolorFlexAlpha": "ostris/Flex.1-alpha",
|
232 |
"XLabs-AI/flux-lora-collection/anime_lora.safetensors":"black-forest-labs/FLUX.1-dev",
|
233 |
"XLabs-AI/flux-lora-collection/scenery_lora.safetensors":"black-forest-labs/FLUX.1-dev",
|
234 |
"XLabs-AI/flux-lora-collection/disney_lora.safetensors":"black-forest-labs/FLUX.1-dev",
|
235 |
+
"XLabs-AI/flux-RealismLora":"black-forest-labs/FLUX.1-dev",
|
236 |
+
"strangerzonehf/Flux-Cute-3D-Kawaii-LoRA":"black-forest-labs/FLUX.1-dev",
|
237 |
+
"SebastianBodza/flux_cute3D":"black-forest-labs/FLUX.1-dev",
|
238 |
+
"gokaygokay/Flux-Seamless-Texture-LoRA":"black-forest-labs/FLUX.1-dev",
|
239 |
+
"gokaygokay/Flux-Game-Assets-LoRA-v2":"black-forest-labs/FLUX.1-dev",
|
240 |
}
|
241 |
condition_type = ["depth", "canny", "subject", "coloring", "deblurring", "fill", "redux"]
|
242 |
+
|
243 |
# Detailed LoRA weight configurations
|
244 |
LORA_DETAILS = {
|
245 |
"AlekseyCalvin/HSTcolorFlexAlpha" : [
|
246 |
{
|
247 |
+
"trigger_words": "(6kStepsCkpt)HST style analog autochrome photo ",
|
248 |
},
|
249 |
{
|
250 |
"weight_name": "HSToricColorFlex_6000steps_LoRAforFluxOrFlex_32dim64alpha.safetensors",
|
251 |
},
|
252 |
{
|
253 |
+
"notes": "We use (6kStepsCkpt)HST style analog autochrome photo as trigger words."
|
254 |
+
},
|
255 |
+
{
|
256 |
+
"title": "HSToric Color Flex",
|
257 |
},
|
258 |
{
|
259 |
"parameters" :{
|
|
|
261 |
}
|
262 |
},
|
263 |
{
|
264 |
+
"thumbnail": "https://huggingface.co/AlekseyCalvin/HSTcolorFlexAlpha/resolve/main/images/example_gypubjtqx.png",
|
265 |
+
"show_in_gallery": True
|
266 |
}
|
267 |
],
|
268 |
"XLabs-AI/flux-lora-collection/anime_lora.safetensors":[
|
|
|
273 |
{
|
274 |
"trigger_words": "anime,",
|
275 |
},
|
276 |
+
{
|
277 |
+
"title": "Anime",
|
278 |
+
},
|
279 |
{
|
280 |
"notes": "You should use ',anime' as trigger words at the end. "
|
281 |
},
|
|
|
285 |
}
|
286 |
},
|
287 |
{
|
288 |
+
"thumbnail": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
|
289 |
+
"show_in_gallery": True
|
290 |
}
|
291 |
],
|
292 |
"XLabs-AI/flux-lora-collection/scenery_lora.safetensors":[
|
|
|
297 |
{
|
298 |
"trigger_words": "scenery style,",
|
299 |
},
|
300 |
+
{
|
301 |
+
"title": "Scenery",
|
302 |
+
},
|
303 |
{
|
304 |
"notes": "You should use ',scenery style' as trigger words at the end. "
|
305 |
},
|
|
|
309 |
}
|
310 |
},
|
311 |
{
|
312 |
+
"thumbnail": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_21.png?raw=true",
|
313 |
+
"show_in_gallery": True
|
314 |
}
|
315 |
],
|
316 |
"XLabs-AI/flux-lora-collection/disney_lora.safetensors":[
|
|
|
321 |
{
|
322 |
"trigger_words": "disney style,",
|
323 |
},
|
324 |
+
{
|
325 |
+
"title": "Disney Style",
|
326 |
+
},
|
327 |
{
|
328 |
"notes": "You should use ',disney style' as trigger words at the end. "
|
329 |
},
|
|
|
333 |
}
|
334 |
},
|
335 |
{
|
336 |
+
"thumbnail": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_19.png?raw=true",
|
337 |
+
"show_in_gallery": True
|
338 |
}
|
339 |
],
|
340 |
"XLabs-AI/flux-RealismLora":[
|
341 |
{
|
342 |
"notes": "No trigger words but 8k, Animatrix illustration style, fantasy style, natural photo cinematic should all work "
|
343 |
},
|
344 |
+
{
|
345 |
+
"title": "Realism",
|
346 |
+
},
|
347 |
{
|
348 |
"parameters" :{
|
349 |
"guidance_scale": "3.2",
|
|
|
352 |
}
|
353 |
},
|
354 |
{
|
355 |
+
"thumbnail": "https://huggingface.co/VideoAditor/Flux-Lora-Realism/resolve/main/images/feel-the-difference-between-using-flux-with-lora-from-xlab-v0-j0ehybmvxehd1.png",
|
356 |
+
"show_in_gallery": True
|
357 |
}
|
358 |
],
|
359 |
"Cossale/Frames2-Flex.1": [
|
|
|
364 |
{
|
365 |
"trigger_words": "FRM$",
|
366 |
},
|
367 |
+
{
|
368 |
+
"title": "Frames2-Flex.1",
|
369 |
+
},
|
370 |
{
|
371 |
"notes": " FRM$ used as trigger words. "
|
372 |
},
|
|
|
376 |
}
|
377 |
},
|
378 |
{
|
379 |
+
"thumbnail": "https://huggingface.co/Cossale/Frames2-Flex.1/resolve/main/samples/1737567472380__000005000_2.jpg",
|
380 |
+
"show_in_gallery": True
|
381 |
}
|
382 |
],
|
383 |
"Yuanshi/OminiControl": [
|
|
|
389 |
"weight_name": "omini/subject_1024_beta.safetensors",
|
390 |
"adapter_name": "subject_1024"
|
391 |
},
|
392 |
+
{
|
393 |
+
"trigger_words": "omini,",
|
394 |
+
},
|
395 |
{
|
396 |
"parameters" :{
|
397 |
"num_inference_steps": "8",
|
|
|
402 |
"notes": "Select an Image as a guide."
|
403 |
},
|
404 |
{
|
405 |
+
"thumbnail": "",
|
406 |
+
"show_in_gallery": False
|
407 |
}
|
408 |
],
|
409 |
"Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design": [
|
|
|
413 |
{
|
414 |
"trigger_words": "wablogo, logo, Minimalist ",
|
415 |
},
|
416 |
+
{
|
417 |
+
"title": "Logo Design",
|
418 |
+
},
|
419 |
{
|
420 |
"pipe" :{
|
421 |
"fuse_lora": {"lora_scale":0.8}
|
|
|
427 |
}
|
428 |
},
|
429 |
{
|
430 |
+
"thumbnail": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design/resolve/main/images/73e7db6a33550d05836ce285549de60075d05373c7b0660d631dac33.jpg",
|
431 |
+
"show_in_gallery": True
|
432 |
+
}
|
433 |
+
],
|
434 |
+
"Shakker-Labs/FLUX.1-dev-LoRA-Micro-landscape-on-Mobile-Phone": [
|
435 |
+
{
|
436 |
+
"notes": "Uses miniature stereoscopic scene as trigger words.."
|
437 |
+
},
|
438 |
+
{
|
439 |
+
"trigger_words": "miniature stereoscopic scene ",
|
440 |
+
},
|
441 |
+
{
|
442 |
+
"title": "Micro Landscape",
|
443 |
+
},
|
444 |
+
{
|
445 |
+
"weight_name": "FLUX-dev-lora-micro-landscape.safetensors",
|
446 |
+
},
|
447 |
+
{
|
448 |
+
"pipe" :{
|
449 |
+
"fuse_lora": {"lora_scale":0.7}
|
450 |
+
}
|
451 |
+
},
|
452 |
+
{
|
453 |
+
"parameters" :{
|
454 |
+
"guidance_scale": "3.5",
|
455 |
+
"num_inference_steps": "24",
|
456 |
+
"scale": "0.72",
|
457 |
+
}
|
458 |
+
},
|
459 |
+
{
|
460 |
+
"thumbnail": "https://huggingface.co/Shakker-Labs/FLUX.1-dev-LoRA-Micro-landscape-on-Mobile-Phone/resolve/main/images/c4f5c765bc8d3d396ed13d65666895ab23ada35c78ca6d91bf814613.jpg",
|
461 |
+
"show_in_gallery": True
|
462 |
}
|
463 |
],
|
464 |
"Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1": [
|
|
|
468 |
{
|
469 |
"trigger_words": "fractalLand ",
|
470 |
},
|
471 |
+
{
|
472 |
+
"title": "fractalLand",
|
473 |
+
},
|
474 |
{
|
475 |
"parameters" :{
|
476 |
"max_sequence_length": "512",
|
|
|
479 |
}
|
480 |
},
|
481 |
{
|
482 |
+
"thumbnail": "https://huggingface.co/Borcherding/FLUX.1-dev-LoRA-FractalLand-v0.1/resolve/main/images/example_e2zoqwftv.png",
|
483 |
+
"show_in_gallery": True
|
484 |
+
}
|
485 |
+
],
|
486 |
+
"strangerzonehf/Flux-Cute-3D-Kawaii-LoRA": [
|
487 |
+
{
|
488 |
+
"weight_name": "Cute-3d-Kawaii.safetensors",
|
489 |
+
},
|
490 |
+
{
|
491 |
+
"notes": "Uses Cute 3d Kawaii at the start of the prompts "
|
492 |
+
},
|
493 |
+
{
|
494 |
+
"trigger_words": "Cute 3d Kawaii ",
|
495 |
+
},
|
496 |
+
{
|
497 |
+
"title": "Cute 3D Kawaii",
|
498 |
+
},
|
499 |
+
{
|
500 |
+
"parameters" :{
|
501 |
+
"height": "1024"
|
502 |
+
}
|
503 |
+
},
|
504 |
+
{
|
505 |
+
"thumbnail": "https://huggingface.co/strangerzonehf/Flux-Cute-3D-Kawaii-LoRA/resolve/main/images/CK3.png",
|
506 |
+
"show_in_gallery": True
|
507 |
+
}
|
508 |
+
],
|
509 |
+
"SebastianBodza/flux_cute3D": [
|
510 |
+
{
|
511 |
+
"notes": "Uses NEOCUTE3D at the start of the prompts "
|
512 |
+
},
|
513 |
+
{
|
514 |
+
"trigger_words": "NEOCUTE3D ",
|
515 |
+
},
|
516 |
+
{
|
517 |
+
"title": "Cute 3D",
|
518 |
+
},
|
519 |
+
{
|
520 |
+
"thumbnail": "https://huggingface.co/SebastianBodza/flux_cute3D/resolve/main/images/astronaut.webp",
|
521 |
+
"show_in_gallery": True
|
522 |
+
}
|
523 |
+
],
|
524 |
+
"gokaygokay/Flux-Seamless-Texture-LoRA": [
|
525 |
+
{
|
526 |
+
"notes": "Uses smlstxtr at the start of the prompts "
|
527 |
+
},
|
528 |
+
{
|
529 |
+
"weight_name": "seamless_texture.safetensors",
|
530 |
+
},
|
531 |
+
{
|
532 |
+
"trigger_words": "smlstxtr ",
|
533 |
+
},
|
534 |
+
{
|
535 |
+
"title": "Seamless Texture",
|
536 |
+
},
|
537 |
+
{
|
538 |
+
"thumbnail": "https://huggingface.co/gokaygokay/Flux-Seamless-Texture-LoRA/resolve/main/images/image3.jpg",
|
539 |
+
"show_in_gallery": True
|
540 |
+
}
|
541 |
+
],
|
542 |
+
"gokaygokay/Flux-Game-Assets-LoRA-v2": [
|
543 |
+
{
|
544 |
+
"notes": "Uses wbgmsst, white background at the start of the prompts "
|
545 |
+
},
|
546 |
+
{
|
547 |
+
"trigger_words": "wbgmsst, white background ",
|
548 |
+
},
|
549 |
+
{
|
550 |
+
"title": "Game Assets",
|
551 |
+
},
|
552 |
+
{
|
553 |
+
"thumbnail": "https://huggingface.co/gokaygokay/Flux-Game-Assets-LoRA-v2/resolve/main/images/example_y2bqpuphc.png",
|
554 |
+
"show_in_gallery": True
|
555 |
}
|
556 |
]
|
557 |
}
|
558 |
+
|
559 |
+
def sort_loras(sortby):
|
560 |
+
loras = []
|
561 |
+
for key, details in LORA_DETAILS.items():
|
562 |
+
lora_info = {
|
563 |
+
"image": "",
|
564 |
+
"title": "",
|
565 |
+
"repo": key,
|
566 |
+
"weights": "",
|
567 |
+
"trigger_word": "",
|
568 |
+
"notes": ""
|
569 |
+
}
|
570 |
+
for item in details:
|
571 |
+
if "thumbnail" in item:
|
572 |
+
lora_info["image"] = item.get("thumbnail", "")
|
573 |
+
if "weight_name" in item:
|
574 |
+
lora_info["weights"] = item.get("weight_name", "")
|
575 |
+
if "trigger_words" in item:
|
576 |
+
lora_info["trigger_word"] = item.get("trigger_words", "")
|
577 |
+
if "notes" in item:
|
578 |
+
lora_info["notes"] = item.get("notes", "")
|
579 |
+
if "title" in item:
|
580 |
+
lora_info["title"] = item.get("title", "")
|
581 |
+
# Default title to key if not provided
|
582 |
+
if not lora_info["title"]:
|
583 |
+
lora_info["title"] = key
|
584 |
+
# Only add to gallery if there is an image set or any item is flagged "show_in_gallery"
|
585 |
+
if lora_info["image"] or any(item.get("show_in_gallery", False) for item in details):
|
586 |
+
loras.append(lora_info)
|
587 |
+
# Add models from the MODELS list with a default thumbnail
|
588 |
+
for model in MODELS:
|
589 |
+
loras.append({
|
590 |
+
"image": f"images/prerendered/th/"+ model.split("/")[-1]+".png",
|
591 |
+
"title": model.split("/")[-1],
|
592 |
+
"repo": model,
|
593 |
+
"weights": "",
|
594 |
+
"trigger_word": "",
|
595 |
+
"notes": ""
|
596 |
+
})
|
597 |
+
# Sort the loras list by the title attribute
|
598 |
+
loras = sorted(loras, key=lambda x: x[sortby])
|
599 |
+
loras.append({
|
600 |
+
"image": f"images/images/Bee-test-2.png",
|
601 |
+
"title": "Manual Entry",
|
602 |
+
"repo": "Manual Entry",
|
603 |
+
"weights": "",
|
604 |
+
"trigger_word": "",
|
605 |
+
"notes": ""
|
606 |
+
})
|
607 |
+
return loras
|
608 |
+
|
609 |
+
LORAS = sort_loras("title")
|
610 |
# Read the contents of the LUT folder, output to a list
|
611 |
lut_folder = "./LUT"
|
612 |
lut_files = [os.path.join(lut_folder, f).replace("\\", "/") for f in os.listdir(lut_folder) if f.endswith(".cube")]
|
utils/hex_grid.py
CHANGED
@@ -15,6 +15,7 @@ from utils.color_utils import update_color_opacity, parse_hex_color, draw_text_w
|
|
15 |
import random # For random text options
|
16 |
import utils.constants as constants # Import constants
|
17 |
import ast
|
|
|
18 |
|
19 |
def calculate_font_size(hex_size, padding=0.6, size_ceil=20, min_font_size=8):
|
20 |
"""
|
@@ -277,6 +278,10 @@ def generate_hexagon_grid_with_text(hex_size, border_size, input_image=None, ima
|
|
277 |
text = f"{col},{row}"
|
278 |
elif add_hex_text_option == "Sequential Numbers":
|
279 |
text = f"{hex_index}"
|
|
|
|
|
|
|
|
|
280 |
elif text_list:
|
281 |
text = text_list[hex_index % len(text_list)]
|
282 |
else:
|
|
|
15 |
import random # For random text options
|
16 |
import utils.constants as constants # Import constants
|
17 |
import ast
|
18 |
+
from utils.misc import number_to_letter
|
19 |
|
20 |
def calculate_font_size(hex_size, padding=0.6, size_ceil=20, min_font_size=8):
|
21 |
"""
|
|
|
278 |
text = f"{col},{row}"
|
279 |
elif add_hex_text_option == "Sequential Numbers":
|
280 |
text = f"{hex_index}"
|
281 |
+
elif add_hex_text_option == "Column Letter, Row Number":
|
282 |
+
text = f"{number_to_letter(col)},{row}"
|
283 |
+
elif add_hex_text_option == "Column Number, Row Letter":
|
284 |
+
text = f"{col},{number_to_letter(row)}"
|
285 |
elif text_list:
|
286 |
text = text_list[hex_index % len(text_list)]
|
287 |
else:
|
utils/image_utils.py
CHANGED
@@ -102,33 +102,34 @@ def build_prerendered_images(images_list):
|
|
102 |
# print(filtered_maps)
|
103 |
def build_prerendered_images_by_quality(quality_limit, key='file'):
|
104 |
"""
|
105 |
-
Retrieve and sort file paths from PRE_RENDERED_MAPS_JSON_LEVELS where quality is
|
106 |
-
|
107 |
-
|
108 |
Args:
|
109 |
-
quality_limit (int):
|
110 |
-
key (str):
|
111 |
-
|
112 |
Returns:
|
113 |
-
|
114 |
-
"""
|
115 |
-
#
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
images_list = [
|
126 |
-
map_info[key].replace("\\", "/")
|
127 |
-
for _, map_info in sorted_maps
|
128 |
-
if map_info['quality'] <= quality_limit
|
129 |
]
|
130 |
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
|
134 |
def build_encoded_images(images_list):
|
@@ -615,7 +616,8 @@ def apply_lut_to_image_path(lut_filename: str, image_path: str) -> tuple[Image,
|
|
615 |
img, new_image_path = convert_to_rgba_png(image_path)
|
616 |
else:
|
617 |
img, new_image_path = convert_to_rgba_png(image_path)
|
618 |
-
|
|
|
619 |
else:
|
620 |
new_image_path = image_path
|
621 |
if lut_filename is not None:
|
@@ -623,7 +625,7 @@ def apply_lut_to_image_path(lut_filename: str, image_path: str) -> tuple[Image,
|
|
623 |
img = apply_lut(img, lut_filename)
|
624 |
except Exception as e:
|
625 |
print(f"BAD LUT: Error applying LUT {str(e)}.")
|
626 |
-
img.save(new_image_path, format='PNG')
|
627 |
return img, str(new_image_path)
|
628 |
|
629 |
############################################# RGBA ###########################################################
|
@@ -838,3 +840,40 @@ def print_json():
|
|
838 |
for key, value in PRE_RENDERED_MAPS_JSON_LEVELS.items():
|
839 |
print(f" '{key}': {{'file': '{value['file']}', 'thumbnail': '{value['thumbnail']}', 'quality': {value['quality']}}},")
|
840 |
print("}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
# print(filtered_maps)
|
103 |
def build_prerendered_images_by_quality(quality_limit, key='file'):
|
104 |
"""
|
105 |
+
Retrieve and sort file paths from PRE_RENDERED_MAPS_JSON_LEVELS where quality is <= quality_limit.
|
106 |
+
Sorts by quality and case-insensitive alphanumeric key.
|
107 |
+
|
108 |
Args:
|
109 |
+
quality_limit (int): Maximum quality threshold
|
110 |
+
key (str): Key to extract file path from map info (default: 'file')
|
111 |
+
|
112 |
Returns:
|
113 |
+
tuple: (sorted file paths list, list of corresponding map names)
|
114 |
+
"""
|
115 |
+
# Pre-compute lowercase alphanumeric key once per item
|
116 |
+
def get_sort_key(item):
|
117 |
+
name, info = item
|
118 |
+
return (info['quality'], ''.join(c for c in name.lower() if c.isalnum()))
|
119 |
+
|
120 |
+
# Single pass: sort and filter
|
121 |
+
filtered_maps = [
|
122 |
+
(info[key].replace("\\", "/"), name)
|
123 |
+
for name, info in sorted(PRE_RENDERED_MAPS_JSON_LEVELS.items(), key=get_sort_key)
|
124 |
+
if info['quality'] <= quality_limit
|
|
|
|
|
|
|
|
|
125 |
]
|
126 |
|
127 |
+
# Split into separate lists efficiently
|
128 |
+
if filtered_maps:
|
129 |
+
#file_paths, map_names = zip(*filtered_maps)
|
130 |
+
#return (build_prerendered_images(file_paths), list(map_names))
|
131 |
+
return [(open_image(file_path), map_name) for file_path, map_name in filtered_maps]
|
132 |
+
return (None,"")
|
133 |
|
134 |
|
135 |
def build_encoded_images(images_list):
|
|
|
616 |
img, new_image_path = convert_to_rgba_png(image_path)
|
617 |
else:
|
618 |
img, new_image_path = convert_to_rgba_png(image_path)
|
619 |
+
if image_path != new_image_path:
|
620 |
+
delete_image(image_path)
|
621 |
else:
|
622 |
new_image_path = image_path
|
623 |
if lut_filename is not None:
|
|
|
625 |
img = apply_lut(img, lut_filename)
|
626 |
except Exception as e:
|
627 |
print(f"BAD LUT: Error applying LUT {str(e)}.")
|
628 |
+
img.save(new_image_path.lower(), format='PNG')
|
629 |
return img, str(new_image_path)
|
630 |
|
631 |
############################################# RGBA ###########################################################
|
|
|
840 |
for key, value in PRE_RENDERED_MAPS_JSON_LEVELS.items():
|
841 |
print(f" '{key}': {{'file': '{value['file']}', 'thumbnail': '{value['thumbnail']}', 'quality': {value['quality']}}},")
|
842 |
print("}")
|
843 |
+
|
844 |
+
def calculate_optimal_fill_dimensions(image: Image.Image):
|
845 |
+
# Extract the original dimensions
|
846 |
+
original_width, original_height = image.size
|
847 |
+
|
848 |
+
# Set constants
|
849 |
+
MIN_ASPECT_RATIO = 9 / 16
|
850 |
+
MAX_ASPECT_RATIO = 16 / 9
|
851 |
+
FIXED_DIMENSION = 1024
|
852 |
+
|
853 |
+
# Calculate the aspect ratio of the original image
|
854 |
+
original_aspect_ratio = original_width / original_height
|
855 |
+
|
856 |
+
# Determine which dimension to fix
|
857 |
+
if original_aspect_ratio > 1: # Wider than tall
|
858 |
+
width = FIXED_DIMENSION
|
859 |
+
height = round(FIXED_DIMENSION / original_aspect_ratio)
|
860 |
+
else: # Taller than wide
|
861 |
+
height = FIXED_DIMENSION
|
862 |
+
width = round(FIXED_DIMENSION * original_aspect_ratio)
|
863 |
+
|
864 |
+
# Ensure dimensions are multiples of 8
|
865 |
+
width = (width // 8) * 8
|
866 |
+
height = (height // 8) * 8
|
867 |
+
|
868 |
+
# Enforce aspect ratio limits
|
869 |
+
calculated_aspect_ratio = width / height
|
870 |
+
if calculated_aspect_ratio > MAX_ASPECT_RATIO:
|
871 |
+
width = (height * MAX_ASPECT_RATIO // 8) * 8
|
872 |
+
elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
|
873 |
+
height = (width / MIN_ASPECT_RATIO // 8) * 8
|
874 |
+
|
875 |
+
# Ensure width and height remain above the minimum dimensions
|
876 |
+
width = max(width, 576) if width == FIXED_DIMENSION else width
|
877 |
+
height = max(height, 576) if height == FIXED_DIMENSION else height
|
878 |
+
|
879 |
+
return width, height
|
utils/lora_details.py
CHANGED
@@ -1,16 +1,14 @@
|
|
1 |
# utils/lora_details.py
|
2 |
|
3 |
import gradio as gr
|
4 |
-
from utils.constants import LORA_DETAILS
|
5 |
-
def upd_prompt_notes_by_index(lora_index):
|
6 |
-
"""
|
7 |
-
Updates the prompt_notes_label with the notes from LORAS based on index.
|
8 |
|
9 |
-
|
10 |
-
|
11 |
|
12 |
-
|
13 |
-
|
|
|
14 |
"""
|
15 |
try:
|
16 |
if LORAS[lora_index]:
|
@@ -20,7 +18,10 @@ def upd_prompt_notes_by_index(lora_index):
|
|
20 |
trigger_position = LORAS[lora_index].get('trigger_position', "")
|
21 |
notes = f"{trigger_position} '{trigger_word}' in prompt"
|
22 |
except IndexError:
|
23 |
-
notes =
|
|
|
|
|
|
|
24 |
return gr.update(value=notes)
|
25 |
|
26 |
def get_trigger_words_by_index(lora_index):
|
@@ -138,3 +139,23 @@ def split_prompt_precisely(prompt, max_tokens=77, model="gpt-3.5-turbo"):
|
|
138 |
remaining_prompt = encoding.decode(remaining_tokens)
|
139 |
|
140 |
return split_prompt, remaining_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# utils/lora_details.py
|
2 |
|
3 |
import gradio as gr
|
4 |
+
from utils.constants import LORA_DETAILS, MODELS, LORAS
|
|
|
|
|
|
|
5 |
|
6 |
+
def get_lora_models():
|
7 |
+
return [(item["image"], item["title"]) for item in LORAS]
|
8 |
|
9 |
+
def upd_prompt_notes_by_index(lora_index):
|
10 |
+
"""
|
11 |
+
Updates the prompt notes label based on the selected LoRA model.
|
12 |
"""
|
13 |
try:
|
14 |
if LORAS[lora_index]:
|
|
|
18 |
trigger_position = LORAS[lora_index].get('trigger_position', "")
|
19 |
notes = f"{trigger_position} '{trigger_word}' in prompt"
|
20 |
except IndexError:
|
21 |
+
notes = (
|
22 |
+
"Enter prompt description of your image. \n"
|
23 |
+
"Using models without LoRA may take 30 minutes."
|
24 |
+
)
|
25 |
return gr.update(value=notes)
|
26 |
|
27 |
def get_trigger_words_by_index(lora_index):
|
|
|
139 |
remaining_prompt = encoding.decode(remaining_tokens)
|
140 |
|
141 |
return split_prompt, remaining_prompt
|
142 |
+
|
143 |
+
def is_lora_loaded(pipe, adapter_name):
|
144 |
+
"""
|
145 |
+
Check if a LoRA weight with the given adapter name is already loaded in the pipeline.
|
146 |
+
|
147 |
+
Args:
|
148 |
+
pipe (FluxPipeline): The pipeline to check.
|
149 |
+
adapter_name (str): The adapter name of the LoRA weight.
|
150 |
+
|
151 |
+
Returns:
|
152 |
+
bool: True if the LoRA weight is loaded, False otherwise.
|
153 |
+
"""
|
154 |
+
adapter_list = pipe.get_list_adapters()
|
155 |
+
for component_adapters in adapter_list.values():
|
156 |
+
if adapter_name in component_adapters:
|
157 |
+
return True
|
158 |
+
|
159 |
+
if adapter_name in getattr(pipe, "peft_config", {}):
|
160 |
+
return True
|
161 |
+
return False
|
utils/misc.py
CHANGED
@@ -108,6 +108,13 @@ def convert_ratio_to_dimensions(ratio, height=512, rotate90=False):
|
|
108 |
adjusted_width, adjusted_height = adjusted_height, adjusted_width
|
109 |
return adjusted_width, adjusted_height
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
def install_torch():
|
112 |
print("\nInstalling PyTorch with CUDA support...")
|
113 |
# Define the package and index URL
|
@@ -189,4 +196,14 @@ def get_seed(randomize_seed: bool, seed: int) -> int:
|
|
189 |
"""
|
190 |
Get the random seed.
|
191 |
"""
|
192 |
-
return np.random.randint(0, MAX_SEED) if randomize_seed else seed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
adjusted_width, adjusted_height = adjusted_height, adjusted_width
|
109 |
return adjusted_width, adjusted_height
|
110 |
|
111 |
+
def update_dimensions_on_ratio(aspect_ratio_str, base_height):
|
112 |
+
# Convert aspect_ratio from a string in format "W:H" into numbers and compute new dimensions.
|
113 |
+
width_ratio, height_ratio = map(int, aspect_ratio_str.split(":"))
|
114 |
+
aspect_ratio = width_ratio / height_ratio
|
115 |
+
new_width, new_height = convert_ratio_to_dimensions(aspect_ratio, base_height)
|
116 |
+
return new_width, new_height
|
117 |
+
|
118 |
def install_torch():
|
119 |
print("\nInstalling PyTorch with CUDA support...")
|
120 |
# Define the package and index URL
|
|
|
196 |
"""
|
197 |
Get the random seed.
|
198 |
"""
|
199 |
+
return np.random.randint(0, MAX_SEED) if randomize_seed else seed
|
200 |
+
|
201 |
+
def number_to_letter(n: int, upper_case: bool = True):
|
202 |
+
result = ''
|
203 |
+
a_char = 97
|
204 |
+
if upper_case:
|
205 |
+
a_char -= 32
|
206 |
+
while abs(n) > 0:
|
207 |
+
n, remainder = divmod(abs(n) - 1, 26)
|
208 |
+
result = chr(a_char + remainder) + result
|
209 |
+
return result
|