Fashion-Style / main.py
ginipick's picture
Update main.py
e54a9a6 verified
# GPU ํŒจ์น˜: NVIDIA ๋“œ๋ผ์ด๋ฒ„๊ฐ€ ์—†์œผ๋ฉด torch.cuda ๊ด€๋ จ ํ˜ธ์ถœ์ด CPU ๋””๋ฐ”์ด์Šค๋กœ ์ „ํ™˜๋˜๋„๋ก ์ฒ˜๋ฆฌ
import torch
if not torch.cuda.is_available():
# torch.cuda.current_device()๊ฐ€ ํ˜ธ์ถœ๋˜๋ฉด 0์„ ๋ฐ˜ํ™˜ํ•˜๋„๋ก ํŒจ์น˜ํ•˜๊ณ ,
# torch.device(์ •์ˆ˜)๋ฅผ ์š”์ฒญ๋ฐ›์œผ๋ฉด "cpu" ์žฅ์น˜๋กœ ๋ฐ˜ํ™˜ํ•˜๋„๋ก ๋ณ€๊ฒฝํ•ฉ๋‹ˆ๋‹ค.
torch.cuda.current_device = lambda: 0
original_torch_device = torch.device
def patched_torch_device(arg):
if isinstance(arg, int):
return original_torch_device("cpu")
return original_torch_device(arg)
torch.device = patched_torch_device
import comfy.options
comfy.options.enable_args_parsing()
import os
import importlib.util
import folder_paths
import time
from comfy.cli_args import args
from app.logger import setup_logger
setup_logger(log_level=args.verbose)
def execute_prestartup_script():
def execute_script(script_path):
module_name = os.path.splitext(script_path)[0]
try:
spec = importlib.util.spec_from_file_location(module_name, script_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return True
except Exception as e:
print(f"Failed to execute startup-script: {script_path} / {e}")
return False
if args.disable_all_custom_nodes:
return
node_paths = folder_paths.get_folder_paths("custom_nodes")
node_prestartup_times = [] # ๋ชจ๋“  ๋…ธ๋“œ์˜ ์‹คํ–‰์‹œ๊ฐ„์„ ๋ˆ„์ ํ•ฉ๋‹ˆ๋‹ค.
for custom_node_path in node_paths:
possible_modules = os.listdir(custom_node_path)
for possible_module in possible_modules:
module_path = os.path.join(custom_node_path, possible_module)
if os.path.isfile(module_path) or module_path.endswith(".disabled") or possible_module == "__pycache__":
continue
script_path = os.path.join(module_path, "prestartup_script.py")
if os.path.exists(script_path):
time_before = time.perf_counter()
success = execute_script(script_path)
elapsed = time.perf_counter() - time_before
node_prestartup_times.append((elapsed, module_path, success))
if node_prestartup_times:
# Rich ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์žˆ๋‹ค๋ฉด ํ…Œ์ด๋ธ” ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•ฉ๋‹ˆ๋‹ค.
try:
from rich.console import Console
from rich.table import Table
console = Console()
table = Table(title="Prestartup Times for Custom Nodes")
table.add_column("Time (s)", justify="right")
table.add_column("Status")
table.add_column("Custom Node Path")
for elapsed, module_path, success in sorted(node_prestartup_times, key=lambda x: x[0]):
status = "[green]Success[/green]" if success else "[red]Failed[/red]"
table.add_row(f"{elapsed:.1f}", status, module_path)
console.print(table)
except ImportError:
# Rich ๋ฏธ์„ค์น˜ ์‹œ ๊ธฐ์กด print ๋ฐฉ์‹ ์‚ฌ์šฉ
print("\nPrestartup times for custom nodes:")
for elapsed, module_path, success in sorted(node_prestartup_times, key=lambda x: x[0]):
import_message = "" if success else " (PRESTARTUP FAILED)"
print("{:6.1f} seconds{}:".format(elapsed, import_message), module_path)
print()
execute_prestartup_script()
# Main code
import asyncio
import itertools
import shutil
import threading
import gc
import logging
import utils.extra_config
if os.name == "nt":
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
if __name__ == "__main__":
if args.cuda_device is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
os.environ['HIP_VISIBLE_DEVICES'] = str(args.cuda_device)
logging.info("Set cuda device to: {}".format(args.cuda_device))
if args.deterministic:
if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ:
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8"
import cuda_malloc
if args.windows_standalone_build:
try:
import fix_torch
except Exception:
pass
import comfy.utils
import execution
import server
from server import BinaryEventTypes
import nodes
import comfy.model_management
# NVIDIA GPU ๋“œ๋ผ์ด๋ฒ„๊ฐ€ ์—†๋Š” ๊ฒฝ์šฐ ๊ฒฝ๊ณ  ๋ฉ”์‹œ์ง€๋ฅผ ๋‚จ๊น๋‹ˆ๋‹ค.
if not torch.cuda.is_available():
logging.warning("No NVIDIA GPU driver found. Running in CPU mode. Performance may be degraded.")
def cuda_malloc_warning():
device = comfy.model_management.get_torch_device()
device_name = comfy.model_management.get_torch_device_name(device)
warning_needed = False
if "cudaMallocAsync" in device_name:
for b in cuda_malloc.blacklist:
if b in device_name:
warning_needed = True
if warning_needed:
logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get 'CUDA error' please run ComfyUI with: --disable-cuda-malloc\n")
def prompt_worker(q, server):
e = execution.PromptExecutor(server, lru_size=args.cache_lru)
last_gc_collect = 0
need_gc = False
gc_collect_interval = 10.0
while True:
timeout = 1000.0
if need_gc:
timeout = max(gc_collect_interval - (time.perf_counter() - last_gc_collect), 0.0)
queue_item = q.get(timeout=timeout)
if queue_item is not None:
item, item_id = queue_item
execution_start_time = time.perf_counter()
prompt_id = item[1]
server.last_prompt_id = prompt_id
e.execute(item[2], prompt_id, item[3], item[4])
need_gc = True
q.task_done(item_id,
e.history_result,
status=execution.PromptQueue.ExecutionStatus(
status_str='success' if e.success else 'error',
completed=e.success,
messages=e.status_messages))
if server.client_id is not None:
server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id)
current_time = time.perf_counter()
execution_time = current_time - execution_start_time
logging.info("Prompt executed in {:.2f} seconds".format(execution_time))
flags = q.get_flags()
free_memory = flags.get("free_memory", False)
if flags.get("unload_models", free_memory):
comfy.model_management.unload_all_models()
need_gc = True
last_gc_collect = 0
if free_memory:
e.reset()
need_gc = True
last_gc_collect = 0
if need_gc:
current_time = time.perf_counter()
if (current_time - last_gc_collect) > gc_collect_interval:
comfy.model_management.cleanup_models()
gc.collect()
comfy.model_management.soft_empty_cache()
last_gc_collect = current_time
need_gc = False
async def run(server, address='', port=8188, verbose=True, call_on_start=None):
addresses = []
for addr in address.split(","):
addresses.append((addr, port))
await asyncio.gather(server.start_multi_address(addresses, call_on_start), server.publish_loop())
def hijack_progress(server):
def hook(value, total, preview_image):
comfy.model_management.throw_exception_if_processing_interrupted()
progress = {"value": value, "max": total, "prompt_id": server.last_prompt_id, "node": server.last_node_id}
server.send_sync("progress", progress, server.client_id)
if preview_image is not None:
server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id)
comfy.utils.set_progress_bar_global_hook(hook)
def cleanup_temp():
temp_dir = folder_paths.get_temp_directory()
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
if __name__ == "__main__":
if args.temp_directory:
temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp")
logging.info(f"Setting temp directory to: {temp_dir}")
folder_paths.set_temp_directory(temp_dir)
cleanup_temp()
if args.windows_standalone_build:
try:
import new_updater
new_updater.update_windows_updater()
except Exception:
pass
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
server = server.PromptServer(loop)
q = execution.PromptQueue(server)
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
utils.extra_config.load_extra_path_config(extra_model_paths_config_path)
if args.extra_model_paths_config:
for config_path in itertools.chain(*args.extra_model_paths_config):
utils.extra_config.load_extra_path_config(config_path)
nodes.init_extra_nodes(init_custom_nodes=not args.disable_all_custom_nodes)
cuda_malloc_warning()
server.add_routes()
hijack_progress(server)
threading.Thread(target=prompt_worker, daemon=True, args=(q, server,)).start()
if args.output_directory:
output_dir = os.path.abspath(args.output_directory)
logging.info(f"Setting output directory to: {output_dir}")
folder_paths.set_output_directory(output_dir)
# ๊ธฐ๋ณธ ๋ชจ๋ธ ์ €์žฅ ํด๋” ๊ฒฝ๋กœ ์„ค์ •
folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints"))
folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip"))
folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae"))
folder_paths.add_model_folder_path("diffusion_models", os.path.join(folder_paths.get_output_directory(), "diffusion_models"))
folder_paths.add_model_folder_path("loras", os.path.join(folder_paths.get_output_directory(), "loras"))
if args.input_directory:
input_dir = os.path.abspath(args.input_directory)
logging.info(f"Setting input directory to: {input_dir}")
folder_paths.set_input_directory(input_dir)
if args.user_directory:
user_dir = os.path.abspath(args.user_directory)
logging.info(f"Setting user directory to: {user_dir}")
folder_paths.set_user_directory(user_dir)
if args.quick_test_for_ci:
exit(0)
os.makedirs(folder_paths.get_temp_directory(), exist_ok=True)
call_on_start = None
if args.auto_launch:
def startup_server(scheme, address, port):
import webbrowser
if os.name == 'nt' and address == '0.0.0.0':
address = '127.0.0.1'
if ':' in address:
address = "[{}]".format(address)
webbrowser.open(f"{scheme}://{address}:{port}")
call_on_start = startup_server
try:
loop.run_until_complete(server.setup())
loop.run_until_complete(run(server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start))
except KeyboardInterrupt:
logging.info("\nStopped server")
cleanup_temp()