MegaTronX commited on
Commit
eae62a9
·
verified ·
1 Parent(s): e3cdce3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -92
app.py CHANGED
@@ -1,103 +1,36 @@
1
- import spaces
2
  import gradio as gr
 
 
3
  import torch
4
- from diffusers import FluxPipeline, FluxTransformer2DModel, FlowMatchEulerDiscreteScheduler
5
- from huggingface_hub import hf_hub_download
6
- from PIL import Image
7
- import numpy as np
8
- import random
9
-
10
-
11
- # Only initialize GPU after spaces import
12
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
-
14
- # Constants
15
- #BASE_MODEL = "black-forest-labs/FLUX.1-dev"
16
- #LORA_MODEL = "MegaTronX/SuicideGirl-FLUX" # Replace with your LoRA path
17
- MAX_SEED = np.iinfo(np.int32).max
18
-
19
-
20
 
21
- pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
22
  pipe.load_lora_weights("MegaTronX/SuicideGirl-FLUX", weight_name="SuicideGirls.safetensors")
23
- pipe.fuse_lora(lora_scale=0.8)
24
- pipe.to("cuda")
25
-
26
-
27
- # Initialize model and scheduler
28
- '''if torch.cuda.is_available():
29
- transformer = FluxTransformer2DModel.from_single_file(
30
- "https://huggingface.co/MegaTronX/SuicideGirl-FLUX/blob/main/SuicideGirls.safetensors",
31
- torch_dtype=torch.bfloat16
32
- )
33
- pipe = FluxPipeline.from_pretrained(
34
- BASE_MODEL,
35
- transformer=transformer,
36
- torch_dtype=torch.bfloat16
37
- )
38
- pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
39
- pipe.scheduler.config, use_beta_sigmas=True
40
- )
41
- pipe.to("cuda")
42
-
43
- # Load and apply LoRA weights
44
- pipe.load_lora_weights(LORA_MODEL)
45
- '''
46
-
47
 
48
  @spaces.GPU
49
- def generate_image(
50
- prompt,
51
- width=768,
52
- height=1024,
53
- guidance_scale=3.5,
54
- num_inference_steps=24,
55
- seed=-1,
56
- num_images=1,
57
- progress=gr.Progress(track_tqdm=True)
58
- ):
59
- if seed == -1:
60
- seed = random.randint(0, MAX_SEED)
61
- generator = torch.Generator().manual_seed(seed)
62
-
63
- images = pipe(
64
  prompt,
65
- width=width,
66
- height=height,
67
- guidance_scale=guidance_scale,
68
  num_inference_steps=num_inference_steps,
 
69
  generator=generator,
70
- output_type="pil",
71
- max_sequence_length=512,
72
- num_images_per_prompt=num_images,
73
- ).images
74
-
75
- return images, seed
76
 
77
  # Gradio Interface
78
- with gr.Blocks() as demo:
79
- gr.HTML("<h1><center>Flux LoRA Image Generator</center></h1>")
80
-
81
- with gr.Group():
82
- prompt = gr.Textbox(label='Enter Your Prompt', lines=3)
83
- generate_button = gr.Button("Generate", variant='primary')
84
-
85
- with gr.Row():
86
- image_output = gr.Gallery(label="Generated Images", columns=2, preview=True)
87
- seed_output = gr.Number(label="Seed Used")
88
-
89
- with gr.Accordion("Advanced Options", open=False):
90
- width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=768)
91
- height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=1024)
92
- guidance_scale = gr.Slider(label="Guidance Scale", minimum=0, maximum=50, step=0.1, value=3.5)
93
- num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=24)
94
- seed = gr.Slider(label="Seed (-1 for random)", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
95
- num_images = gr.Slider(label="Number of Images", minimum=1, maximum=4, step=1, value=1)
96
-
97
- generate_button.click(
98
- fn=generate_image,
99
- inputs=[prompt, width, height, guidance_scale, num_inference_steps, seed, num_images],
100
- outputs=[image_output, seed_output]
101
- )
102
-
103
- demo.launch()
 
 
1
  import gradio as gr
2
+ import spaces
3
+ from diffusers import DiffusionPipeline
4
  import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev")
7
  pipe.load_lora_weights("MegaTronX/SuicideGirl-FLUX", weight_name="SuicideGirls.safetensors")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  @spaces.GPU
10
+ def generate_image(prompt, num_inference_steps=25, guidance_scale=7.5, seed=None):
11
+ """Generates an image using the FLUX.1-dev LoRA model."""
12
+ generator = torch.Generator("cuda").manual_seed(seed) if seed else None
13
+
14
+ image = pipe(
 
 
 
 
 
 
 
 
 
 
15
  prompt,
 
 
 
16
  num_inference_steps=num_inference_steps,
17
+ guidance_scale=guidance_scale,
18
  generator=generator,
19
+ ).images[0]
20
+ return image
 
 
 
 
21
 
22
  # Gradio Interface
23
+ iface = gr.Interface(
24
+ fn=generate_image,
25
+ inputs=[
26
+ gr.Textbox(lines=3, label="Prompt"),
27
+ gr.Slider(minimum=10, maximum=100, value=25, label="Inference Steps"),
28
+ gr.Slider(minimum=1, maximum=15, value=7.5, label="Guidance Scale"),
29
+ gr.Number(label="Seed (Optional)"),
30
+ ],
31
+ outputs=gr.Image(label="Generated Image"),
32
+ title="FLUX.1-dev LoRA Demo",
33
+ description="A demo of your FLUX.1-dev LoRA model.",
34
+ )
35
+
36
+ iface.launch()