Deffusion.X / app(7).py
ArrcttacsrjksX's picture
Upload app(7).py
8342297 verified
import os
import requests
import subprocess
import gradio as gr
# Token Hugging Face từ biến môi trường
hf_token = os.getenv("HF_TOKEN")
# URLs cần tải
app_url = "https://huggingface.co/datasets/ArrcttacsrjksX/Deffusion/resolve/main/RunModelAppp/App/sdRundeffusiononhuggingfacemaster-ac54e00"
model_url = "https://huggingface.co/datasets/ArrcttacsrjksX/Deffusion/resolve/main/Model/realisticVisionV60B1_v51HyperVAE.safetensors"
# Đường dẫn lưu file
app_path = "sdRundeffusiononhuggingfacemaster-ac54e00"
model_path = "realisticVisionV60B1_v51HyperVAE.safetensors"
# Hàm tải file từ Hugging Face
def download_file(url, output_path, token):
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(url, headers=headers, stream=True)
response.raise_for_status() # Kiểm tra lỗi
with open(output_path, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Downloaded: {output_path}")
# Tải các file nếu chưa tồn tại
if not os.path.exists(app_path):
download_file(app_url, app_path, hf_token)
subprocess.run(["chmod", "+x", app_path]) # Thay đổi quyền thực thi
if not os.path.exists(model_path):
download_file(model_url, model_path, hf_token)
# Hàm xử lý chạy ứng dụng
def run_command(prompt, mode, height, width, steps, seed, init_image=None, threads=-1, weight_type="f32", negative_prompt="", cfg_scale=7.0, strength=0.75, style_ratio=0.2, control_strength=0.9, sampling_method="euler_a", batch_count=1, schedule="discrete", clip_skip=-1, vae_tiling=False, vae_on_cpu=False, clip_on_cpu=False, control_net_cpu=False, canny=False, color=False, verbose=False, rng="cuda"):
try:
# Lưu ảnh đầu vào nếu được cung cấp
init_image_path = None
if init_image is not None:
init_image_path = "input_image.png"
init_image.save(init_image_path)
# Tạo lệnh chạy
command = [
f"./{app_path}",
"-M", mode,
"-m", model_path,
"-p", prompt,
"-H", str(height),
"-W", str(width),
"--steps", str(steps),
"-s", str(seed),
"-t", str(threads),
"--type", weight_type,
"--cfg-scale", str(cfg_scale),
"--strength", str(strength),
"--style-ratio", str(style_ratio),
"--control-strength", str(control_strength),
"--sampling-method", sampling_method,
"--batch-count", str(batch_count),
"--schedule", schedule,
"--clip-skip", str(clip_skip),
"--vae-tiling" if vae_tiling else None,
"--vae-on-cpu" if vae_on_cpu else None,
"--clip-on-cpu" if clip_on_cpu else None,
"--control-net-cpu" if control_net_cpu else None,
"--canny" if canny else None,
"--color" if color else None,
"-v" if verbose else None,
"--rng", rng
]
# Loại bỏ các giá trị None trong danh sách lệnh
command = [arg for arg in command if arg is not None]
# Thêm ảnh đầu vào nếu có
if mode == "img2img" and init_image_path:
command.extend(["-i", init_image_path])
# Chạy lệnh và hiển thị log theo thời gian thực
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
logs = []
for line in process.stdout:
logs.append(line.strip()) # Lưu log vào danh sách
print(line, end="") # In log ra màn hình
process.wait() # Đợi tiến trình hoàn thành
# Kiểm tra kết quả và trả về
if process.returncode == 0:
output_path = "./output.png" # Đường dẫn ảnh đầu ra mặc định
return output_path if os.path.exists(output_path) else None, "\n".join(logs)
else:
error_log = process.stderr.read() # Đọc lỗi
logs.append(error_log)
return None, "\n".join(logs)
except Exception as e:
return None, str(e)
# Giao diện Gradio
def toggle_image_input(mode):
"""Hiển thị hoặc ẩn ô Drop Image dựa trên mode."""
return gr.update(visible=(mode == "img2img"))
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown(
"""
# 🌟 **Stable Diffusion Interface**
Generate stunning images from text or modify existing images with AI-powered tools.
"""
)
# Thiết lập giao diện
with gr.Row():
with gr.Column():
prompt = gr.Textbox(
label="🎨 Prompt", placeholder="Enter your creative idea here...", lines=2
)
mode = gr.Radio(
choices=["txt2img", "img2img"], value="txt2img", label="Mode", interactive=True
)
init_image = gr.Image(
label="Drop Image (for img2img mode)", type="pil", visible=False
)
mode.change(toggle_image_input, inputs=mode, outputs=init_image)
negative_prompt = gr.Textbox(
label="Negative Prompt", placeholder="Anything to avoid in the image", lines=2
)
threads = gr.Slider(-1, 64, value=-1, step=1, label="Threads", interactive=True)
weight_type = gr.Dropdown(choices=["f32", "f16", "q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "q2_k", "q3_k", "q4_k"], value="f32", label="Weight Type")
cfg_scale = gr.Slider(0, 20, value=7.0, step=0.1, label="CFG Scale", interactive=True)
strength = gr.Slider(0, 1, value=0.75, step=0.01, label="Strength", interactive=True)
style_ratio = gr.Slider(0, 1, value=0.2, step=0.01, label="Style Ratio", interactive=True)
control_strength = gr.Slider(0, 1, value=0.9, step=0.01, label="Control Strength", interactive=True)
sampling_method = gr.Dropdown(choices=["euler", "euler_a", "heun", "dpm2", "dpm++2s_a", "dpm++2m", "dpm++2mv2", "ipndm", "ipndm_v", "lcm"], value="euler_a", label="Sampling Method")
batch_count = gr.Slider(1, 10, value=1, step=1, label="Batch Count", interactive=True)
schedule = gr.Dropdown(choices=["discrete", "karras", "exponential", "ays", "gits"], value="discrete", label="Denoiser Schedule")
clip_skip = gr.Slider(-1, 2, value=-1, step=1, label="Clip Skip", interactive=True)
vae_tiling = gr.Checkbox(label="VAE Tiling", interactive=True)
vae_on_cpu = gr.Checkbox(label="VAE on CPU", interactive=True)
clip_on_cpu = gr.Checkbox(label="CLIP on CPU", interactive=True)
control_net_cpu = gr.Checkbox(label="Control Net on CPU", interactive=True)
canny = gr.Checkbox(label="Apply Canny Preprocessor", interactive=True)
color = gr.Checkbox(label="Color Logs", interactive=True)
verbose = gr.Checkbox(label="Verbose Output", interactive=True)
rng = gr.Radio(choices=["std_default", "cuda"], value="cuda", label="Random Number Generator", interactive=True)
with gr.Column():
height = gr.Slider(
128, 1024, value=512, step=64, label="Image Height (px)", interactive=True
)
width = gr.Slider(
128, 1024, value=512, step=64, label="Image Width (px)", interactive=True
)
steps = gr.Slider(
1, 100, value=20, step=1, label="Sampling Steps", interactive=True
)
seed = gr.Slider(
1, 10000, value=42, step=1, label="Seed", interactive=True
)
generate_btn = gr.Button("Run")
output_image = gr.Image(label="Generated Image")
logs_output = gr.Textbox(label="Logs", interactive=False, lines=15)
generate_btn.click(
run_command,
inputs=[
prompt, mode, height, width, steps, seed, init_image, threads, weight_type, negative_prompt,
cfg_scale, strength, style_ratio, control_strength, sampling_method, batch_count, schedule,
clip_skip, vae_tiling, vae_on_cpu, clip_on_cpu, control_net_cpu, canny, color, verbose, rng
],
outputs=[output_image, logs_output],
)
demo.launch()