Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -52,7 +52,6 @@ def download_lora(url, lora_dir):
|
|
52 |
bar.update(len(data))
|
53 |
file.write(data)
|
54 |
|
55 |
-
# 验证下载大小
|
56 |
if total_size != 0 and bar.n != total_size:
|
57 |
print(f"❌ 下载 LoRA 时出错:文件大小不匹配。删除了不完整的文件。")
|
58 |
os.remove(lora_path)
|
@@ -63,7 +62,7 @@ def download_lora(url, lora_dir):
|
|
63 |
|
64 |
except Exception as e:
|
65 |
print(f"❌ 下载 LoRA 失败: {e}")
|
66 |
-
if os.path.exists(lora_path):
|
67 |
os.remove(lora_path)
|
68 |
return None
|
69 |
else:
|
@@ -78,13 +77,11 @@ def generate_image(pipe, prompt, seed=42, randomize_seed=False, width=768, heigh
|
|
78 |
"""
|
79 |
MAX_SEED = np.iinfo(np.int32).max
|
80 |
|
81 |
-
# 决定最终种子
|
82 |
if randomize_seed:
|
83 |
used_seed = random.randint(0, MAX_SEED)
|
84 |
else:
|
85 |
used_seed = int(seed)
|
86 |
|
87 |
-
# 创建 generator(使用 pipe 所在设备)
|
88 |
device = pipe.device if hasattr(pipe, "device") else ("cuda" if torch.cuda.is_available() else "cpu")
|
89 |
generator = torch.Generator(device=device).manual_seed(used_seed)
|
90 |
|
@@ -120,11 +117,9 @@ def main():
|
|
120 |
dtype = torch.bfloat16
|
121 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
122 |
|
123 |
-
# 加载 VAE 然后加载 pipeline
|
124 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", subfolder="vae", torch_dtype=dtype)
|
125 |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", torch_dtype=dtype, vae=good_vae).to(device)
|
126 |
|
127 |
-
# --- 新增:处理 LoRA ---
|
128 |
if args.lora:
|
129 |
print("-" * 20)
|
130 |
print("🔧 正在处理 LoRA...")
|
@@ -140,7 +135,6 @@ def main():
|
|
140 |
else:
|
141 |
print("⚠️ 未能获取 LoRA 文件,将不使用 LoRA 继续生成。")
|
142 |
print("-" * 20)
|
143 |
-
# --- LoRA 处理结束 ---
|
144 |
|
145 |
if device == "cuda":
|
146 |
torch.cuda.empty_cache()
|
@@ -148,9 +142,8 @@ def main():
|
|
148 |
print(f"✅ 模型加载完成,使用设备: {device}")
|
149 |
print(f"🎨 开始为提示生成图像: '{args.prompt}'")
|
150 |
|
151 |
-
# 处理 seed: 0 表示随机;否则就是固定值(默认 42)
|
152 |
randomize = (args.seed == 0)
|
153 |
-
seed_value = args.seed if not randomize else 42
|
154 |
|
155 |
generated_image, used_seed = generate_image(
|
156 |
pipe=pipe,
|
@@ -163,15 +156,11 @@ def main():
|
|
163 |
guidance_scale=args.guidance
|
164 |
)
|
165 |
|
166 |
-
#
|
167 |
output_dir = "output"
|
168 |
os.makedirs(output_dir, exist_ok=True)
|
169 |
-
|
170 |
-
#
|
171 |
-
prompt_slug = "".join(filter(str.isalnum, args.prompt))[:50]
|
172 |
-
output_filename = f"output_{used_seed}_{prompt_slug}.png"
|
173 |
-
output_path = os.path.abspath(os.path.join(output_dir, output_filename))
|
174 |
-
|
175 |
|
176 |
print(f"💾 正在保存图像到: {output_path}")
|
177 |
generated_image.save(output_path)
|
@@ -179,9 +168,8 @@ def main():
|
|
179 |
print(f"🎉 完成!文件保存在: {output_path}")
|
180 |
print(f"🔢 使用的种子: {used_seed} (seed param was {'0(random)' if args.seed==0 else args.seed})")
|
181 |
print(f"🖼️ 大小: {args.width}x{args.height}, steps: {args.steps}, guidance: {args.guidance}")
|
182 |
-
if args.lora:
|
183 |
-
print(f"🎨 使用的 LoRA: {os.path.basename(
|
184 |
-
|
185 |
|
186 |
if __name__ == "__main__":
|
187 |
main()
|
|
|
52 |
bar.update(len(data))
|
53 |
file.write(data)
|
54 |
|
|
|
55 |
if total_size != 0 and bar.n != total_size:
|
56 |
print(f"❌ 下载 LoRA 时出错:文件大小不匹配。删除了不完整的文件。")
|
57 |
os.remove(lora_path)
|
|
|
62 |
|
63 |
except Exception as e:
|
64 |
print(f"❌ 下载 LoRA 失败: {e}")
|
65 |
+
if os.path.exists(lora_path):
|
66 |
os.remove(lora_path)
|
67 |
return None
|
68 |
else:
|
|
|
77 |
"""
|
78 |
MAX_SEED = np.iinfo(np.int32).max
|
79 |
|
|
|
80 |
if randomize_seed:
|
81 |
used_seed = random.randint(0, MAX_SEED)
|
82 |
else:
|
83 |
used_seed = int(seed)
|
84 |
|
|
|
85 |
device = pipe.device if hasattr(pipe, "device") else ("cuda" if torch.cuda.is_available() else "cpu")
|
86 |
generator = torch.Generator(device=device).manual_seed(used_seed)
|
87 |
|
|
|
117 |
dtype = torch.bfloat16
|
118 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
119 |
|
|
|
120 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", subfolder="vae", torch_dtype=dtype)
|
121 |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", torch_dtype=dtype, vae=good_vae).to(device)
|
122 |
|
|
|
123 |
if args.lora:
|
124 |
print("-" * 20)
|
125 |
print("🔧 正在处理 LoRA...")
|
|
|
135 |
else:
|
136 |
print("⚠️ 未能获取 LoRA 文件,将不使用 LoRA 继续生成。")
|
137 |
print("-" * 20)
|
|
|
138 |
|
139 |
if device == "cuda":
|
140 |
torch.cuda.empty_cache()
|
|
|
142 |
print(f"✅ 模型加载完成,使用设备: {device}")
|
143 |
print(f"🎨 开始为提示生成图像: '{args.prompt}'")
|
144 |
|
|
|
145 |
randomize = (args.seed == 0)
|
146 |
+
seed_value = args.seed if not randomize else 42
|
147 |
|
148 |
generated_image, used_seed = generate_image(
|
149 |
pipe=pipe,
|
|
|
156 |
guidance_scale=args.guidance
|
157 |
)
|
158 |
|
159 |
+
# --- 修改部分:确保输出路径固定 ---
|
160 |
output_dir = "output"
|
161 |
os.makedirs(output_dir, exist_ok=True)
|
162 |
+
output_path = os.path.abspath(os.path.join(output_dir, "output.png"))
|
163 |
+
# --- 修改结束 ---
|
|
|
|
|
|
|
|
|
164 |
|
165 |
print(f"💾 正在保存图像到: {output_path}")
|
166 |
generated_image.save(output_path)
|
|
|
168 |
print(f"🎉 完成!文件保存在: {output_path}")
|
169 |
print(f"🔢 使用的种子: {used_seed} (seed param was {'0(random)' if args.seed==0 else args.seed})")
|
170 |
print(f"🖼️ 大小: {args.width}x{args.height}, steps: {args.steps}, guidance: {args.guidance}")
|
171 |
+
if args.lora and lora_file_path:
|
172 |
+
print(f"🎨 使用的 LoRA: {os.path.basename(lora_file_path)}")
|
|
|
173 |
|
174 |
if __name__ == "__main__":
|
175 |
main()
|