|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch, os, sys, subprocess, random, math, hashlib, json, time |
|
import torch.nn as nn |
|
import torchvision.transforms as transforms |
|
import numpy as np |
|
from PIL import Image, ImageFilter, ImageEnhance, ImageOps, ImageDraw, ImageChops |
|
from PIL.PngImagePlugin import PngInfo |
|
from urllib.request import urlopen |
|
|
|
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) |
|
sys.path.append('../ComfyUI') |
|
|
|
import comfy.samplers |
|
import comfy.sd |
|
import comfy.utils |
|
|
|
import comfy_extras.clip_vision |
|
|
|
import model_management |
|
import importlib |
|
|
|
import nodes |
|
|
|
|
|
MIDAS_INSTALLED = False |
|
|
|
|
|
|
|
|
|
def packages(): |
|
import sys, subprocess |
|
return [r.decode().split('==')[0] for r in subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']).split()] |
|
|
|
|
|
def tensor2pil(image): |
|
return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)) |
|
|
|
|
|
def pil2tensor(image): |
|
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0) |
|
|
|
|
|
def pil2hex(image): |
|
return hashlib.sha256(np.array(tensor2pil(image)).astype(np.uint16).tobytes()).hexdigest().hex(); |
|
|
|
|
|
def medianFilter(img, diameter, sigmaColor, sigmaSpace): |
|
import cv2 as cv |
|
diameter = int(diameter); sigmaColor = int(sigmaColor); sigmaSpace = int(sigmaSpace) |
|
img = img.convert('RGB') |
|
img = cv.cvtColor(np.array(img), cv.COLOR_RGB2BGR) |
|
img = cv.bilateralFilter(img, diameter, sigmaColor, sigmaSpace) |
|
img = cv.cvtColor(np.array(img), cv.COLOR_BGR2RGB) |
|
return Image.fromarray(img).convert('RGB') |
|
|
|
|
|
|
|
legacy_was_nodes = ['fDOF_WAS.py','Image_Blank_WAS.py','Image_Blend_WAS.py','Image_Canny_Filter_WAS.py', 'Canny_Filter_WAS.py','Image_Combine_WAS.py','Image_Edge_Detection_WAS.py', 'Image_Film_Grain_WAS.py', 'Image_Filters_WAS.py', 'Image_Flip_WAS.py','Image_Nova_Filter_WAS.py','Image_Rotate_WAS.py','Image_Style_Filter_WAS.py','Latent_Noise_Injection_WAS.py','Latent_Upscale_WAS.py','MiDaS_Depth_Approx_WAS.py','NSP_CLIPTextEncoder.py','Samplers_WAS.py'] |
|
legacy_was_nodes_found = [] |
|
f_disp = False |
|
for f in legacy_was_nodes: |
|
node_path_dir = os.getcwd()+'/ComfyUI/custom_nodes/' |
|
file = f'{node_path_dir}{f}' |
|
if os.path.exists(file): |
|
import zipfile |
|
if not f_disp: |
|
print('\033[34mWAS Node Suite:\033[0m Found legacy nodes. Archiving legacy nodes...') |
|
f_disp = True |
|
legacy_was_nodes_found.append(file) |
|
if legacy_was_nodes_found: |
|
from os.path import basename |
|
archive = zipfile.ZipFile(f'{node_path_dir}WAS_Legacy_Nodes_Backup_{round(time.time())}.zip', "w") |
|
for f in legacy_was_nodes_found: |
|
archive.write(f, basename(f)) |
|
try: |
|
os.remove(f) |
|
except OSError: |
|
pass |
|
archive.close() |
|
if f_disp: |
|
print('\033[34mWAS Node Suite:\033[0m Legacy cleanup complete.') |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_Filters: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"brightness": ("FLOAT", {"default": 0.0, "min": -1.0, "max": 1.0, "step": 0.01}), |
|
"contrast": ("FLOAT", {"default": 1.0, "min": -1.0, "max": 2.0, "step": 0.01}), |
|
"saturation": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 5.0, "step": 0.01}), |
|
"sharpness": ("FLOAT", {"default": 1.0, "min": -5.0, "max": 5.0, "step": 0.01}), |
|
"blur": ("INT", {"default": 0, "min": 0, "max": 16, "step": 1}), |
|
"gaussian_blur": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1024.0, "step": 0.1}), |
|
"edge_enhance": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_filters" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_filters(self, image, brightness, contrast, saturation, sharpness, blur, gaussian_blur, edge_enhance): |
|
|
|
pil_image = None |
|
|
|
|
|
if brightness > 0.0 or brightness < 0.0: |
|
|
|
image = np.clip(image + brightness, 0.0, 1.0) |
|
|
|
if contrast > 1.0 or contrast < 1.0: |
|
|
|
image = np.clip(image * contrast, 0.0, 1.0) |
|
|
|
|
|
if saturation > 1.0 or saturation < 1.0: |
|
|
|
pil_image = tensor2pil(image) |
|
|
|
pil_image = ImageEnhance.Color(pil_image).enhance(saturation) |
|
|
|
if sharpness > 1.0 or sharpness < 1.0: |
|
|
|
pil_image = pil_image if pil_image else tensor2pil(image) |
|
|
|
pil_image = ImageEnhance.Sharpness(pil_image).enhance(sharpness) |
|
|
|
if blur > 0: |
|
|
|
pil_image = pil_image if pil_image else tensor2pil(image) |
|
|
|
for _ in range(blur): |
|
pil_image = pil_image.filter(ImageFilter.BLUR) |
|
|
|
if gaussian_blur > 0.0: |
|
|
|
pil_image = pil_image if pil_image else tensor2pil(image) |
|
|
|
pil_image = pil_image.filter(ImageFilter.GaussianBlur(radius = gaussian_blur)) |
|
|
|
if edge_enhance > 0.0: |
|
|
|
pil_image = pil_image if pil_image else tensor2pil(image) |
|
|
|
edge_enhanced_img = pil_image.filter(ImageFilter.EDGE_ENHANCE_MORE) |
|
|
|
blend_mask = Image.new(mode = "L", size = pil_image.size, color = (round(edge_enhance * 255))) |
|
|
|
pil_image = Image.composite(edge_enhanced_img, pil_image, blend_mask) |
|
|
|
del blend_mask, edge_enhanced_img |
|
|
|
|
|
out_image = ( pil2tensor(pil_image) if pil_image else image ) |
|
|
|
return ( out_image, ) |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_Style_Filter: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"style": ([ |
|
"1977", |
|
"aden", |
|
"brannan", |
|
"brooklyn", |
|
"clarendon", |
|
"earlybird", |
|
"gingham", |
|
"hudson", |
|
"inkwell", |
|
"kelvin", |
|
"lark", |
|
"lofi", |
|
"maven", |
|
"mayfair", |
|
"moon", |
|
"nashville", |
|
"perpetua", |
|
"reyes", |
|
"rise", |
|
"slumber", |
|
"stinson", |
|
"toaster", |
|
"valencia", |
|
"walden", |
|
"willow", |
|
"xpro2" |
|
],), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_style_filter" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_style_filter(self, image, style): |
|
|
|
|
|
if 'pilgram' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing Pilgram...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'pilgram']) |
|
|
|
|
|
import pilgram |
|
|
|
|
|
image = tensor2pil(image) |
|
|
|
|
|
match style: |
|
case "1977": |
|
out_image = pilgram._1977(image) |
|
case "aden": |
|
out_image = pilgram.aden(image) |
|
case "brannan": |
|
out_image = pilgram.brannan(image) |
|
case "brooklyn": |
|
out_image = pilgram.brooklyn(image) |
|
case "clarendon": |
|
out_image = pilgram.clarendon(image) |
|
case "earlybird": |
|
out_image = pilgram.earlybird(image) |
|
case "gingham": |
|
out_image = pilgram.gingham(image) |
|
case "hudson": |
|
out_image = pilgram.hudson(image) |
|
case "inkwell": |
|
out_image = pilgram.inkwell(image) |
|
case "kelvin": |
|
out_image = pilgram.kelvin(image) |
|
case "lark": |
|
out_image = pilgram.lark(image) |
|
case "lofi": |
|
out_image = pilgram.lofi(image) |
|
case "maven": |
|
out_image = pilgram.maven(image) |
|
case "mayfair": |
|
out_image = pilgram.mayfair(image) |
|
case "moon": |
|
out_image = pilgram.moon(image) |
|
case "nashville": |
|
out_image = pilgram.nashville(image) |
|
case "perpetua": |
|
out_image = pilgram.perpetua(image) |
|
case "reyes": |
|
out_image = pilgram.reyes(image) |
|
case "rise": |
|
out_image = pilgram.rise(image) |
|
case "slumber": |
|
out_image = pilgram.slumber(image) |
|
case "stinson": |
|
out_image = pilgram.stinson(image) |
|
case "toaster": |
|
out_image = pilgram.toaster(image) |
|
case "valencia": |
|
out_image = pilgram.valencia(image) |
|
case "walden": |
|
out_image = pilgram.walden(image) |
|
case "willow": |
|
out_image = pilgram.willow(image) |
|
case "xpro2": |
|
out_image = pilgram.xpro2(image) |
|
case _: |
|
out_image = image |
|
|
|
out_image = out_image.convert("RGB") |
|
|
|
return ( torch.from_numpy(np.array(out_image).astype(np.float32) / 255.0).unsqueeze(0), ) |
|
|
|
|
|
|
|
|
|
class WAS_Image_Blending_Mode: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image_a": ("IMAGE",), |
|
"image_b": ("IMAGE",), |
|
"mode": ([ |
|
"add", |
|
"color", |
|
"color_burn", |
|
"color_dodge", |
|
"darken", |
|
"difference", |
|
"exclusion", |
|
"hard_light", |
|
"hue", |
|
"lighten", |
|
"multiply", |
|
"overlay", |
|
"screen", |
|
"soft_light" |
|
],), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_blending_mode" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_blending_mode(self, image_a, image_b, mode): |
|
|
|
|
|
if 'pilgram' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing Pilgram...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'pilgram']) |
|
|
|
|
|
import pilgram |
|
|
|
|
|
img_a = tensor2pil(image_a) |
|
img_b = tensor2pil(image_b) |
|
|
|
|
|
match mode: |
|
case "color": |
|
out_image = pilgram.css.blending.color(img_a, img_b) |
|
case "color_burn": |
|
out_image = pilgram.css.blending.color_burn(img_a, img_b) |
|
case "color_dodge": |
|
out_image = pilgram.css.blending.color_dodge(img_a, img_b) |
|
case "darken": |
|
out_image = pilgram.css.blending.darken(img_a, img_b) |
|
case "difference": |
|
out_image = pilgram.css.blending.difference(img_a, img_b) |
|
case "exclusion": |
|
out_image = pilgram.css.blending.exclusion(img_a, img_b) |
|
case "hard_light": |
|
out_image = pilgram.css.blending.hard_light(img_a, img_b) |
|
case "hue": |
|
out_image = pilgram.css.blending.hue(img_a, img_b) |
|
case "lighten": |
|
out_image = pilgram.css.blending.lighten(img_a, img_b) |
|
case "multiply": |
|
out_image = pilgram.css.blending.multiply(img_a, img_b) |
|
case "add": |
|
out_image = pilgram.css.blending.normal(img_a, img_b) |
|
case "overlay": |
|
out_image = pilgram.css.blending.overlay(img_a, img_b) |
|
case "screen": |
|
out_image = pilgram.css.blending.screen(img_a, img_b) |
|
case "soft_light": |
|
out_image = pilgram.css.blending.soft_light(img_a, img_b) |
|
case _: |
|
out_image = img_a |
|
|
|
out_image = out_image.convert("RGB") |
|
|
|
return ( pil2tensor(out_image), ) |
|
|
|
|
|
|
|
class WAS_Image_Blend: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image_a": ("IMAGE",), |
|
"image_b": ("IMAGE",), |
|
"blend_percentage": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_blend" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_blend(self, image_a, image_b, blend_percentage): |
|
|
|
|
|
img_a = tensor2pil(image_a) |
|
img_b = tensor2pil(image_b) |
|
|
|
|
|
blend_mask = Image.new(mode = "L", size = img_a.size, color = (round(blend_percentage * 255))) |
|
blend_mask = ImageOps.invert(blend_mask) |
|
img_result = Image.composite(img_a, img_b, blend_mask) |
|
|
|
del img_a, img_b, blend_mask |
|
|
|
return ( pil2tensor(img_result), ) |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_Threshold: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_threshold" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_threshold(self, image, threshold=0.5): |
|
return ( pil2tensor(self.apply_threshold(tensor2pil(image), threshold)), ) |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_Chromatic_Aberration: |
|
|
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"red_offset": ("INT", {"default": 2, "min": -255, "max": 255, "step": 1}), |
|
"green_offset": ("INT", {"default": -1, "min": -255, "max": 255, "step": 1}), |
|
"blue_offset": ("INT", {"default": 1, "min": -255, "max": 255, "step": 1}), |
|
"intensity": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_chromatic_aberration" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_chromatic_aberration(self, image, red_offset=4, green_offset=2, blue_offset=0, intensity=1): |
|
return ( pil2tensor(self.apply_chromatic_aberration(tensor2pil(image), red_offset, green_offset, blue_offset, intensity)), ) |
|
|
|
|
|
def apply_chromatic_aberration(self, img, r_offset, g_offset, b_offset, intensity): |
|
|
|
r, g, b = img.split() |
|
|
|
|
|
r_offset_img = ImageChops.offset(r, r_offset, 0) |
|
g_offset_img = ImageChops.offset(g, 0, g_offset) |
|
b_offset_img = ImageChops.offset(b, 0, b_offset) |
|
|
|
|
|
blended_r = ImageChops.blend(r, r_offset_img, intensity) |
|
blended_g = ImageChops.blend(g, g_offset_img, intensity) |
|
blended_b = ImageChops.blend(b, b_offset_img, intensity) |
|
|
|
|
|
result = Image.merge("RGB", (blended_r, blended_g, blended_b)) |
|
|
|
return result |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_Bloom_Filter: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"radius": ("FLOAT", {"default": 10, "min": 0.0, "max": 1024, "step": 0.1}), |
|
"intensity": ("FLOAT", {"default": 1, "min": 0.0, "max": 1.0, "step": 0.1}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_bloom" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_bloom(self, image, radius=0.5, intensity=1.0): |
|
return ( pil2tensor(self.apply_bloom_filter(tensor2pil(image), radius, intensity)), ) |
|
|
|
def apply_bloom_filter(self, input_image, radius, bloom_factor): |
|
|
|
blurred_image = input_image.filter(ImageFilter.GaussianBlur(radius=radius)) |
|
|
|
|
|
high_pass_filter = ImageChops.subtract(input_image, blurred_image) |
|
|
|
|
|
bloom_filter = high_pass_filter.filter(ImageFilter.GaussianBlur(radius=radius*2)) |
|
|
|
|
|
bloom_filter = ImageEnhance.Brightness(bloom_filter).enhance(2.0) |
|
|
|
|
|
bloom_filter = ImageChops.multiply(bloom_filter, Image.new('RGB', input_image.size, (int(255 * bloom_factor), int(255 * bloom_factor), int(255 * bloom_factor)))) |
|
|
|
|
|
blended_image = ImageChops.screen(input_image, bloom_filter) |
|
|
|
return blended_image |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_Remove_Color: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"target_red": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
"target_green": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
"target_blue": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
"replace_red": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
"replace_green": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
"replace_blue": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
"clip_threshold": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_remove_color" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_remove_color(self, image, clip_threshold=10, target_red=255, target_green=255, target_blue=255, replace_red=255, replace_green=255, replace_blue=255): |
|
return ( pil2tensor(self.apply_remove_color(tensor2pil(image), clip_threshold, (target_red, target_green, target_blue), (replace_red, replace_green, replace_blue))), ) |
|
|
|
def apply_remove_color(self, image, threshold=10, color=(255, 255, 255), rep_color=(0, 0, 0)): |
|
|
|
color_image = Image.new('RGB', image.size, color) |
|
|
|
|
|
diff_image = ImageChops.difference(image, color_image) |
|
|
|
|
|
gray_image = diff_image.convert('L') |
|
|
|
|
|
mask_image = gray_image.point(lambda x: 255 if x > threshold else 0) |
|
|
|
|
|
mask_image = ImageOps.invert(mask_image) |
|
|
|
|
|
result_image = Image.composite(Image.new('RGB', image.size, rep_color), image, mask_image) |
|
|
|
return result_image |
|
|
|
|
|
|
|
|
|
class WAS_Image_Blend_Mask: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image_a": ("IMAGE",), |
|
"image_b": ("IMAGE",), |
|
"mask": ("IMAGE",), |
|
"blend_percentage": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_blend_mask" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_blend_mask(self, image_a, image_b, mask, blend_percentage): |
|
|
|
|
|
img_a = tensor2pil(image_a) |
|
img_b = tensor2pil(image_b) |
|
mask = ImageOps.invert(tensor2pil(mask).convert('L')) |
|
|
|
|
|
masked_img = Image.composite(img_a, img_b, mask.resize(img_a.size)) |
|
|
|
|
|
blend_mask = Image.new(mode = "L", size = img_a.size, color = (round(blend_percentage * 255))) |
|
blend_mask = ImageOps.invert(blend_mask) |
|
img_result = Image.composite(img_a, masked_img, blend_mask) |
|
|
|
del img_a, img_b, blend_mask, mask |
|
|
|
return ( pil2tensor(img_result), ) |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_Blank: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 1}), |
|
"height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 1}), |
|
"red": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
"green": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
"blue": ("INT", {"default": 255, "min": 0, "max": 255, "step": 1}), |
|
} |
|
} |
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "blank_image" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def blank_image(self, width, height, red, green, blue): |
|
|
|
|
|
width = ( width // 8 ) * 8 |
|
height = ( height // 8 ) * 8 |
|
|
|
|
|
blank = Image.new(mode = "RGB", size = (width, height), color = (red, green, blue)) |
|
|
|
return ( pil2tensor(blank), ) |
|
|
|
|
|
|
|
|
|
class WAS_Image_High_Pass_Filter: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"radius": ("INT", {"default": 10, "min": 1, "max": 500, "step": 1}), |
|
"strength": ("FLOAT", {"default": 1.5, "min": 0.0, "max": 255.0, "step": 0.1}) |
|
} |
|
} |
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "high_pass" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def high_pass(self, image, radius=10, strength=1.5): |
|
hpf = tensor2pil(image).convert('L') |
|
return ( pil2tensor(self.apply_hpf(hpf.convert('RGB'), radius, strength)), ) |
|
|
|
def apply_hpf(self, img, radius=10, strength=1.5): |
|
|
|
|
|
img_arr = np.array(img).astype('float') |
|
|
|
|
|
blurred_arr = np.array(img.filter(ImageFilter.GaussianBlur(radius=radius))).astype('float') |
|
|
|
|
|
hpf_arr = img_arr - blurred_arr |
|
hpf_arr = np.clip(hpf_arr * strength, 0, 255).astype('uint8') |
|
|
|
|
|
return Image.fromarray(hpf_arr, mode='RGB') |
|
|
|
|
|
|
|
|
|
class WAS_Image_Levels: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"black_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max":255.0, "step": 0.1}), |
|
"mid_level": ("FLOAT", {"default": 127.5, "min": 0.0, "max": 255.0, "step": 0.1}), |
|
"white_level": ("FLOAT", {"default": 255, "min": 0.0, "max": 255.0, "step": 0.1}), |
|
} |
|
} |
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "apply_image_levels" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def apply_image_levels(self, image, black_level, mid_level, white_level): |
|
|
|
|
|
image = tensor2pil(image) |
|
|
|
|
|
|
|
|
|
levels = self.AdjustLevels(black_level, mid_level, white_level) |
|
image = levels.adjust(image) |
|
|
|
|
|
return ( pil2tensor(image), ) |
|
|
|
def adjust_levels(self, image, black=0.0, mid=1.0, white=255): |
|
""" |
|
Adjust the black, mid, and white levels of an RGB image. |
|
""" |
|
|
|
result = Image.new(image.mode, image.size) |
|
|
|
|
|
if mid < 0 or mid > 1: |
|
raise ValueError("mid value must be between 0 and 1") |
|
|
|
|
|
lut = [] |
|
for i in range(256): |
|
if i < black: |
|
lut.append(0) |
|
elif i > white: |
|
lut.append(255) |
|
else: |
|
lut.append(int(((i - black) / (white - black)) ** mid * 255.0)) |
|
|
|
|
|
r, g, b = image.split() |
|
|
|
|
|
r = r.point(lut) |
|
g = g.point(lut) |
|
b = b.point(lut) |
|
|
|
|
|
result = Image.merge("RGB", (r, g, b)) |
|
|
|
return result |
|
|
|
class AdjustLevels: |
|
def __init__(self, min_level, mid_level, max_level): |
|
self.min_level = min_level |
|
self.mid_level = mid_level |
|
self.max_level = max_level |
|
|
|
def adjust(self, im): |
|
|
|
|
|
|
|
im_arr = np.array(im) |
|
|
|
|
|
im_arr[im_arr < self.min_level] = self.min_level |
|
|
|
|
|
im_arr = (im_arr - self.min_level) * (255 / (self.max_level - self.min_level)) |
|
im_arr[im_arr < 0] = 0 |
|
im_arr[im_arr > 255] = 255 |
|
im_arr = im_arr.astype(np.uint8) |
|
|
|
|
|
im = Image.fromarray(im_arr) |
|
im = ImageOps.autocontrast(im, cutoff=self.max_level) |
|
|
|
return im |
|
|
|
|
|
|
|
|
|
class WAS_Film_Grain: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"density": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 1.0, "step": 0.01}), |
|
"intensity": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 1.0, "step": 0.01}), |
|
"highlights": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 255.0, "step": 0.01}), |
|
"supersample_factor": ("INT", {"default": 4, "min": 1, "max": 8, "step": 1}) |
|
} |
|
} |
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "film_grain" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def film_grain(self, image, density, intensity, highlights, supersample_factor): |
|
return ( pil2tensor(self.apply_film_grain(tensor2pil(image), density, intensity, highlights, supersample_factor)), ) |
|
|
|
def apply_film_grain(self, img, density=0.1, intensity=1.0, highlights=1.0, supersample_factor = 4): |
|
""" |
|
Apply grayscale noise with specified density, intensity, and highlights to a PIL image. |
|
""" |
|
|
|
img_gray = img.convert('L') |
|
|
|
|
|
original_size = img.size |
|
img_gray = img_gray.resize(((img.size[0] * supersample_factor), (img.size[1] * supersample_factor)), Image.Resampling(2)) |
|
|
|
|
|
num_pixels = int(density * img_gray.size[0] * img_gray.size[1]) |
|
|
|
|
|
noise_pixels = [] |
|
for i in range(num_pixels): |
|
x = random.randint(0, img_gray.size[0]-1) |
|
y = random.randint(0, img_gray.size[1]-1) |
|
noise_pixels.append((x, y)) |
|
|
|
|
|
for x, y in noise_pixels: |
|
value = random.randint(0, 255) |
|
img_gray.putpixel((x, y), value) |
|
|
|
|
|
img_noise = img_gray.convert('RGB') |
|
|
|
|
|
img_noise = img_noise.filter(ImageFilter.GaussianBlur(radius = 0.125)) |
|
|
|
|
|
img_noise = img_noise.resize(original_size, Image.Resampling(1)) |
|
|
|
|
|
img_noise = img_noise.filter(ImageFilter.EDGE_ENHANCE_MORE) |
|
|
|
|
|
img_final = Image.blend(img, img_noise, intensity) |
|
|
|
|
|
enhancer = ImageEnhance.Brightness(img_final) |
|
img_highlights = enhancer.enhance(highlights) |
|
|
|
|
|
return img_highlights |
|
|
|
|
|
|
|
|
|
class WAS_Image_Flip: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"mode": (["horizontal", "vertical",],), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_flip" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_flip(self, image, mode): |
|
|
|
|
|
image = tensor2pil(image) |
|
|
|
|
|
if mode == 'horizontal': |
|
image = image.transpose(0) |
|
if mode == 'vertical': |
|
image = image.transpose(1) |
|
|
|
return ( pil2tensor(image), ) |
|
|
|
|
|
class WAS_Image_Rotate: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"mode": (["transpose", "internal",],), |
|
"rotation": ("INT", {"default": 0, "min": 0, "max": 360, "step": 90}), |
|
"sampler": (["nearest", "bilinear", "bicubic"],), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_rotate" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_rotate(self, image, mode, rotation, sampler): |
|
|
|
|
|
image = tensor2pil(image) |
|
|
|
|
|
if rotation > 360: |
|
rotation = int(360) |
|
if (rotation % 90 != 0): |
|
rotation = int((rotation//90)*90); |
|
|
|
|
|
match sampler: |
|
case 'nearest': |
|
sampler = PIL.Image.NEAREST |
|
case 'bicubic': |
|
sampler = PIL.Image.BICUBIC |
|
case 'bilinear': |
|
sampler = PIL.Image.BILINEAR |
|
|
|
|
|
if mode == 'internal': |
|
image = image.rotate(rotation, sampler) |
|
else: |
|
rot = int(rotation / 90) |
|
for _ in range(rot): |
|
image = image.transpose(2) |
|
|
|
return ( torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0), ) |
|
|
|
|
|
|
|
|
|
class WAS_Image_Nova_Filter: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"amplitude": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.001}), |
|
"frequency": ("FLOAT", {"default": 3.14, "min": 0.0, "max": 100.0, "step": 0.001}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "nova_sine" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def nova_sine(self, image, amplitude, frequency): |
|
|
|
|
|
img = tensor2pil(image) |
|
|
|
|
|
img_array = np.array(img) |
|
|
|
|
|
def sine(x, freq, amp): |
|
return amp * np.sin(2 * np.pi * freq * x) |
|
|
|
|
|
resolution = img.info.get('dpi') |
|
physical_size = img.size |
|
|
|
if resolution is not None: |
|
|
|
ppm = 25.4 / resolution |
|
physical_size = tuple(int(pix * ppm) for pix in physical_size) |
|
|
|
|
|
max_freq = img.width / 2 |
|
|
|
|
|
if frequency > max_freq: |
|
frequency = max_freq |
|
|
|
|
|
for i in range(img_array.shape[0]): |
|
for j in range(img_array.shape[1]): |
|
for k in range(img_array.shape[2]): |
|
img_array[i,j,k] = int(sine(img_array[i,j,k]/255, frequency, amplitude) * 255) |
|
|
|
return ( torch.from_numpy(img_array.astype(np.float32) / 255.0).unsqueeze(0), ) |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Canny_Filter: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"enable_threshold": (['false', 'true'],), |
|
"threshold_low": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
"threshold_high": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "canny_filter" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def canny_filter(self, image, threshold_low, threshold_high, enable_threshold): |
|
|
|
self.install_opencv() |
|
|
|
if enable_threshold == 'false': |
|
threshold_low = None |
|
threshold_high = None |
|
|
|
image_canny = Image.fromarray(self.Canny_detector(255. * image.cpu().numpy().squeeze(), threshold_low, threshold_high)).convert('RGB') |
|
|
|
return ( pil2tensor(image_canny), ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
def Canny_detector(self, img, weak_th = None, strong_th = None): |
|
|
|
import cv2 |
|
|
|
|
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
|
|
|
|
|
img = cv2.GaussianBlur(img, (5, 5), 1.4) |
|
|
|
|
|
gx = cv2.Sobel(np.float32(img), cv2.CV_64F, 1, 0, 3) |
|
gy = cv2.Sobel(np.float32(img), cv2.CV_64F, 0, 1, 3) |
|
|
|
|
|
mag, ang = cv2.cartToPolar(gx, gy, angleInDegrees = True) |
|
|
|
|
|
|
|
mag_max = np.max(mag) |
|
if not weak_th:weak_th = mag_max * 0.1 |
|
if not strong_th:strong_th = mag_max * 0.5 |
|
|
|
|
|
height, width = img.shape |
|
|
|
|
|
|
|
for i_x in range(width): |
|
for i_y in range(height): |
|
|
|
grad_ang = ang[i_y, i_x] |
|
grad_ang = abs(grad_ang-180) if abs(grad_ang)>180 else abs(grad_ang) |
|
|
|
|
|
|
|
|
|
if grad_ang<= 22.5: |
|
neighb_1_x, neighb_1_y = i_x-1, i_y |
|
neighb_2_x, neighb_2_y = i_x + 1, i_y |
|
|
|
|
|
elif grad_ang>22.5 and grad_ang<=(22.5 + 45): |
|
neighb_1_x, neighb_1_y = i_x-1, i_y-1 |
|
neighb_2_x, neighb_2_y = i_x + 1, i_y + 1 |
|
|
|
|
|
elif grad_ang>(22.5 + 45) and grad_ang<=(22.5 + 90): |
|
neighb_1_x, neighb_1_y = i_x, i_y-1 |
|
neighb_2_x, neighb_2_y = i_x, i_y + 1 |
|
|
|
|
|
elif grad_ang>(22.5 + 90) and grad_ang<=(22.5 + 135): |
|
neighb_1_x, neighb_1_y = i_x-1, i_y + 1 |
|
neighb_2_x, neighb_2_y = i_x + 1, i_y-1 |
|
|
|
|
|
elif grad_ang>(22.5 + 135) and grad_ang<=(22.5 + 180): |
|
neighb_1_x, neighb_1_y = i_x-1, i_y |
|
neighb_2_x, neighb_2_y = i_x + 1, i_y |
|
|
|
|
|
if width>neighb_1_x>= 0 and height>neighb_1_y>= 0: |
|
if mag[i_y, i_x]<mag[neighb_1_y, neighb_1_x]: |
|
mag[i_y, i_x]= 0 |
|
continue |
|
|
|
if width>neighb_2_x>= 0 and height>neighb_2_y>= 0: |
|
if mag[i_y, i_x]<mag[neighb_2_y, neighb_2_x]: |
|
mag[i_y, i_x]= 0 |
|
|
|
weak_ids = np.zeros_like(img) |
|
strong_ids = np.zeros_like(img) |
|
ids = np.zeros_like(img) |
|
|
|
|
|
for i_x in range(width): |
|
for i_y in range(height): |
|
|
|
grad_mag = mag[i_y, i_x] |
|
|
|
if grad_mag<weak_th: |
|
mag[i_y, i_x]= 0 |
|
elif strong_th>grad_mag>= weak_th: |
|
ids[i_y, i_x]= 1 |
|
else: |
|
ids[i_y, i_x]= 2 |
|
|
|
|
|
|
|
return mag |
|
|
|
def install_opencv(self): |
|
if 'opencv-python' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing CV2...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python']) |
|
|
|
|
|
|
|
|
|
class WAS_Image_Edge: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"mode": (["normal", "laplacian"],), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "image_edges" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def image_edges(self, image, mode): |
|
|
|
|
|
image = tensor2pil(image) |
|
|
|
|
|
match mode: |
|
case "normal": |
|
image = image.filter(ImageFilter.FIND_EDGES) |
|
case "laplacian": |
|
image = image.filter(ImageFilter.Kernel((3, 3), (-1, -1, -1, -1, 8, |
|
-1, -1, -1, -1), 1, 0)) |
|
case _: |
|
image = image |
|
|
|
return ( torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0), ) |
|
|
|
|
|
|
|
|
|
class WAS_Image_fDOF: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"depth": ("IMAGE",), |
|
"mode": (["mock","gaussian","box"],), |
|
"radius": ("INT", {"default": 8, "min": 1, "max": 128, "step": 1}), |
|
"samples": ("INT", {"default": 1, "min": 1, "max": 3, "step": 1}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "fdof_composite" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def fdof_composite(self, image, depth, radius, samples, mode): |
|
|
|
if 'opencv-python' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing CV2...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python']) |
|
|
|
import cv2 as cv |
|
|
|
|
|
i = 255. * image.cpu().numpy().squeeze() |
|
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) |
|
d = 255. * depth.cpu().numpy().squeeze() |
|
depth_img = Image.fromarray(np.clip(d, 0, 255).astype(np.uint8)) |
|
|
|
|
|
fdof_image = self.portraitBlur(img, depth_img, radius, samples, mode) |
|
|
|
return ( torch.from_numpy(np.array(fdof_image).astype(np.float32) / 255.0).unsqueeze(0), ) |
|
|
|
def portraitBlur(self, img, mask, radius=5, samples=1, mode = 'mock'): |
|
mask = mask.resize(img.size).convert('L') |
|
if mode == 'mock': |
|
bimg = medianFilter(img, radius, (radius * 1500), 75) |
|
elif mode == 'gaussian': |
|
bimg = img.filter(ImageFilter.GaussianBlur(radius = radius)) |
|
elif mode == 'box': |
|
bimg = img.filter(ImageFilter.BoxBlur(radius)) |
|
bimg.convert(img.mode) |
|
rimg = None |
|
if samples > 1: |
|
for i in range(samples): |
|
if i == 0: |
|
rimg = Image.composite(img, bimg, mask) |
|
else: |
|
rimg = Image.composite(rimg, bimg, mask) |
|
else: |
|
rimg = Image.composite(img, bimg, mask).convert('RGB') |
|
|
|
return rimg |
|
|
|
|
|
def lens_blur(img, radius, amount, mask=None): |
|
"""Applies a lens shape blur effect on an image. |
|
|
|
Args: |
|
img (numpy.ndarray): The input image as a numpy array. |
|
radius (float): The radius of the lens shape. |
|
amount (float): The amount of blur to be applied. |
|
mask (numpy.ndarray): An optional mask image specifying where to apply the blur. |
|
|
|
Returns: |
|
numpy.ndarray: The blurred image as a numpy array. |
|
""" |
|
|
|
kernel = cv2.getGaussianKernel(ksize=int(radius * 10), sigma=0) |
|
kernel = np.dot(kernel, kernel.T) |
|
|
|
|
|
kernel /= np.max(kernel) |
|
|
|
|
|
mask_shape = (int(radius * 2), int(radius * 2)) |
|
mask = np.ones(mask_shape) if mask is None else cv2.resize(mask, mask_shape, interpolation=cv2.INTER_LINEAR) |
|
mask = cv2.GaussianBlur(mask, (int(radius * 2) + 1, int(radius * 2) + 1), radius / 2) |
|
mask /= np.max(mask) |
|
|
|
|
|
ksize_x = img.shape[1] // (kernel.shape[1] + 1) |
|
ksize_y = img.shape[0] // (kernel.shape[0] + 1) |
|
kernel = cv2.resize(kernel, (ksize_x, ksize_y), interpolation=cv2.INTER_LINEAR) |
|
kernel = cv2.copyMakeBorder(kernel, 0, img.shape[0] - kernel.shape[0], 0, img.shape[1] - kernel.shape[1], cv2.BORDER_CONSTANT, value=0) |
|
mask = cv2.resize(mask, (ksize_x, ksize_y), interpolation=cv2.INTER_LINEAR) |
|
mask = cv2.copyMakeBorder(mask, 0, img.shape[0] - mask.shape[0], 0, img.shape[1] - mask.shape[1], cv2.BORDER_CONSTANT, value=0) |
|
|
|
|
|
blurred = cv2.filter2D(img, -1, kernel) |
|
blurred = cv2.filter2D(blurred, -1, mask * amount) |
|
|
|
if mask is not None: |
|
|
|
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) |
|
img_masked = img * mask |
|
|
|
blurred = img_masked * (1 - mask) + blurred |
|
|
|
return blurred |
|
|
|
|
|
|
|
|
|
class WAS_Image_Median_Filter: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"diameter": ("INT", {"default": 2.0, "min": 0.1, "max": 255, "step": 1}), |
|
"sigma_color": ("FLOAT", {"default": 10.0, "min": -255.0, "max": 255.0, "step": 0.1}), |
|
"sigma_space": ("FLOAT", {"default": 10.0, "min": -255.0, "max": 255.0, "step": 0.1}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "apply_median_filter" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def apply_median_filter(self, image, diameter, sigma_color, sigma_space): |
|
|
|
|
|
image = tensor2pil(image) |
|
|
|
|
|
image = medianFilter(image, diameter, sigma_color, sigma_space) |
|
|
|
return ( pil2tensor(image), ) |
|
|
|
|
|
|
|
class WAS_Image_Select_Color: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"red": ("INT", {"default": 255.0, "min": 0.0, "max": 255.0, "step": 0.1}), |
|
"green": ("INT", {"default": 255.0, "min": 0.0, "max": 255.0, "step": 0.1}), |
|
"blue": ("INT", {"default": 255.0, "min": 0.0, "max": 255.0, "step": 0.1}), |
|
"variance": ("INT", {"default": 10, "min": 0, "max": 255, "step": 1}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "select_color" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def select_color(self, image, red=255, green=255, blue=255, variance=10): |
|
|
|
if 'opencv-python' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing CV2...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python']) |
|
|
|
image = self.color_pick(tensor2pil(image), red, green, blue, variance) |
|
|
|
return ( pil2tensor(image), ) |
|
|
|
|
|
def color_pick(self, image, red=255, green=255, blue=255, variance=10): |
|
|
|
image = image.convert('RGB') |
|
|
|
|
|
selected_color = Image.new('RGB', image.size, (0,0,0)) |
|
|
|
|
|
width, height = image.size |
|
|
|
|
|
for x in range(width): |
|
for y in range(height): |
|
|
|
pixel = image.getpixel((x,y)) |
|
r,g,b = pixel |
|
|
|
|
|
if ((r >= red-variance) and (r <= red+variance) and |
|
(g >= green-variance) and (g <= green+variance) and |
|
(b >= blue-variance) and (b <= blue+variance)): |
|
|
|
selected_color.putpixel((x,y),(r,g,b)) |
|
|
|
|
|
return selected_color |
|
|
|
|
|
|
|
class WAS_Image_Select_Channel: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"channel": (['red','green','blue'],), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "select_channel" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def select_channel(self, image, channel='red'): |
|
|
|
image = self.convert_to_single_channel(tensor2pil(image), channel) |
|
|
|
return ( pil2tensor(image), ) |
|
|
|
|
|
def convert_to_single_channel(self, image, channel='red'): |
|
|
|
|
|
image = image.convert('RGB') |
|
|
|
|
|
if channel == 'red': |
|
channel_img = image.split()[0].convert('L') |
|
elif channel == 'green': |
|
channel_img = image.split()[1].convert('L') |
|
elif channel == 'blue': |
|
channel_img = image.split()[2].convert('L') |
|
else: |
|
raise ValueError("Invalid channel option. Please choose 'red', 'green', or 'blue'.") |
|
|
|
|
|
channel_img = Image.merge('RGB', (channel_img, channel_img, channel_img)) |
|
|
|
return channel_img |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_RGB_Merge: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"red_channel": ("IMAGE",), |
|
"green_channel": ("IMAGE",), |
|
"blue_channel": ("IMAGE",), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "merge_channels" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def merge_channels(self, red_channel, green_channel, blue_channel): |
|
|
|
|
|
image = self.mix_rgb_channels(tensor2pil(red_channel).convert('L'), tensor2pil(green_channel).convert('L'), tensor2pil(blue_channel).convert('L')) |
|
|
|
return ( pil2tensor(image), ) |
|
|
|
|
|
def mix_rgb_channels(self, red, green, blue): |
|
|
|
width, height = red.size; merged_img = Image.new('RGB', (width, height)) |
|
|
|
|
|
merged_img = Image.merge('RGB', (red, green, blue)) |
|
|
|
return merged_img |
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_Save: |
|
def __init__(self): |
|
self.output_dir = os.path.join(os.getcwd()+'/ComfyUI', "output") |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"images": ("IMAGE", ), |
|
"output_path": ("STRING", {"default": './ComfyUI/output', "multiline": False}), |
|
"filename_prefix": ("STRING", {"default": "ComfyUI"}), |
|
"extension": (['png', 'jpeg', 'tiff', 'gif'], ), |
|
"quality": ("INT", {"default": 100, "min": 1, "max": 100, "step": 1}), |
|
}, |
|
"hidden": { |
|
"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO" |
|
}, |
|
} |
|
|
|
RETURN_TYPES = () |
|
FUNCTION = "save_images" |
|
|
|
OUTPUT_NODE = True |
|
|
|
CATEGORY = "WAS Suite/IO" |
|
|
|
def save_images(self, images, output_path='', filename_prefix="ComfyUI", extension='png', quality=100, prompt=None, extra_pnginfo=None): |
|
def map_filename(filename): |
|
prefix_len = len(filename_prefix) |
|
prefix = filename[:prefix_len + 1] |
|
try: |
|
digits = int(filename[prefix_len + 1:].split('_')[0]) |
|
except: |
|
digits = 0 |
|
return (digits, prefix) |
|
|
|
|
|
if output_path.strip() != '': |
|
if not os.path.exists(output_path.strip()): |
|
print(f'\033[34mWAS NS\033[0m Error: The path `{output_path.strip()}` specified doesn\'t exist! Defaulting to `{self.output_dir}` directory.') |
|
else: |
|
self.output_dir = os.path.normpath(output_path.strip()) |
|
print(self.output_dir) |
|
|
|
|
|
try: |
|
counter = max(filter(lambda a: a[1][:-1] == filename_prefix and a[1][-1] == "_", map(map_filename, os.listdir(self.output_dir))))[0] + 1 |
|
except ValueError: |
|
counter = 1 |
|
except FileNotFoundError: |
|
os.mkdir(self.output_dir) |
|
counter = 1 |
|
|
|
paths = list() |
|
for image in images: |
|
i = 255. * image.cpu().numpy() |
|
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) |
|
metadata = PngInfo() |
|
if prompt is not None: |
|
metadata.add_text("prompt", json.dumps(prompt)) |
|
if extra_pnginfo is not None: |
|
for x in extra_pnginfo: |
|
metadata.add_text(x, json.dumps(extra_pnginfo[x])) |
|
file = f"{filename_prefix}_{counter:05}_.{extension}" |
|
if extension == 'png': |
|
img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True) |
|
elif extension == 'webp': |
|
img.save(os.path.join(self.output_dir, file), quality=quality) |
|
elif extension == 'jpeg': |
|
img.save(os.path.join(self.output_dir, file), quality=quality, optimize=True) |
|
elif extension == 'tiff': |
|
img.save(os.path.join(self.output_dir, file), quality=quality, optimize=True) |
|
else: |
|
img.save(os.path.join(self.output_dir, file)) |
|
paths.append(file) |
|
counter += 1 |
|
return { "ui": { "images": paths } } |
|
|
|
|
|
|
|
class WAS_Load_Image: |
|
|
|
def __init__(self): |
|
self.input_dir = os.path.join(os.getcwd()+'/ComfyUI', "input") |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return {"required": |
|
{"image_path": ("STRING", {"default": './ComfyUI/input/example.png', "multiline": False}),} |
|
} |
|
|
|
CATEGORY = "WAS Suite/IO" |
|
|
|
RETURN_TYPES = ("IMAGE", "MASK") |
|
FUNCTION = "load_image" |
|
def load_image(self, image_path): |
|
try: |
|
i = Image.open(image_path) |
|
except OSError: |
|
print(f'\033[34mWAS NS\033[0m Error: The image `{output_path.strip()}` specified doesn\'t exist!') |
|
i = Image.new(mode='RGB', size=(512,512), color=(0,0,0)) |
|
image = i.convert("RGB") |
|
image = np.array(image).astype(np.float32) / 255.0 |
|
image = torch.from_numpy(image)[None,] |
|
if 'A' in i.getbands(): |
|
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 |
|
mask = 1. - torch.from_numpy(mask) |
|
else: |
|
mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") |
|
return (image, mask) |
|
|
|
@classmethod |
|
def IS_CHANGED(s, image_path): |
|
m = hashlib.sha256() |
|
with open(image_path, 'rb') as f: |
|
m.update(f.read()) |
|
return m.digest().hex() |
|
|
|
|
|
|
|
|
|
class WAS_Tensor_Batch_to_Image: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"images_batch": ("IMAGE",), |
|
"batch_image_number": ("INT", {"default": 0, "min": 0, "max": 64, "step": 1}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "tensor_batch_to_image" |
|
|
|
CATEGORY = "WAS Suite/Latent" |
|
|
|
def tensor_batch_to_image(self, images_batch=None, batch_image_number=0): |
|
|
|
count = 0 |
|
for _ in images_batch: |
|
if batch_image_number == count: |
|
return ( images_batch[batch_image_number].unsqueeze(0), ) |
|
count = count+1 |
|
|
|
print(f"\033[34mWAS NS\033[0m Error: Batch number `{batch_image_number}` is not defined, returning last image") |
|
return( images_batch[-1].unsqueeze(0), ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
class WAS_Image_To_Mask: |
|
|
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return {"required": |
|
{"image": ("IMAGE",), |
|
"channel": (["alpha", "red", "green", "blue"], ),} |
|
} |
|
|
|
CATEGORY = "WAS Suite/Latent" |
|
|
|
RETURN_TYPES = ("MASK",) |
|
|
|
FUNCTION = "image_to_mask" |
|
|
|
def image_to_mask(self, image, channel): |
|
|
|
img = tensor2pil(image) |
|
|
|
mask = None |
|
c = channel[0].upper() |
|
if c in img.getbands(): |
|
mask = np.array(img.getchannel(c)).astype(np.float32) / 255.0 |
|
mask = torch.from_numpy(mask) |
|
if c == 'A': |
|
mask = 1. - mask |
|
else: |
|
mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") |
|
|
|
return ( mask, ) |
|
|
|
|
|
|
|
|
|
class WAS_Latent_Upscale: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return {"required": { "samples": ("LATENT",), "mode": (["bilinear", "bicubic", "trilinear"],), |
|
"factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 8.0, "step": 0.1}), |
|
"align": (["true", "false"], )}} |
|
RETURN_TYPES = ("LATENT",) |
|
FUNCTION = "latent_upscale" |
|
|
|
CATEGORY = "WAS Suite/Latent" |
|
|
|
def latent_upscale(self, samples, mode, factor, align): |
|
s = samples.copy() |
|
s["samples"] = torch.nn.functional.interpolate(s['samples'], scale_factor=factor, mode=mode, align_corners=( True if align == 'true' else False )) |
|
return (s,) |
|
|
|
|
|
|
|
class WAS_Latent_Noise: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"samples": ("LATENT",), |
|
"noise_std": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
} |
|
} |
|
|
|
RETURN_TYPES = ("LATENT",) |
|
FUNCTION = "inject_noise" |
|
|
|
CATEGORY = "WAS Suite/Latent" |
|
|
|
def inject_noise(self, samples, noise_std): |
|
s = samples.copy() |
|
noise = torch.randn_like(s["samples"]) * noise_std |
|
s["samples"] = s["samples"] + noise |
|
return (s,) |
|
|
|
|
|
|
|
|
|
class MiDaS_Depth_Approx: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"use_cpu": (["false", "true"],), |
|
"midas_model": (["DPT_Large", "DPT_Hybrid", "DPT_Small"],), |
|
"invert_depth": (["false", "true"],), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE",) |
|
FUNCTION = "midas_approx" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def midas_approx(self, image, use_cpu, midas_model, invert_depth): |
|
|
|
global MIDAS_INSTALLED |
|
|
|
if not MIDAS_INSTALLED: |
|
self.install_midas() |
|
|
|
import cv2 as cv |
|
|
|
|
|
i = 255. * image.cpu().numpy().squeeze() |
|
img = i |
|
|
|
print("\033[34mWAS NS:\033[0m Downloading and loading MiDaS Model...") |
|
midas = torch.hub.load("intel-isl/MiDaS", midas_model, trust_repo=True) |
|
device = torch.device("cuda") if torch.cuda.is_available() and use_cpu == 'false' else torch.device("cpu") |
|
|
|
print('\033[34mWAS NS:\033[0m MiDaS is using device:', device) |
|
|
|
midas.to(device).eval() |
|
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") |
|
|
|
if midas_model == "DPT_Large" or midas_model == "DPT_Hybrid": |
|
transform = midas_transforms.dpt_transform |
|
else: |
|
transform = midas_transforms.small_transform |
|
|
|
img = cv.cvtColor(img, cv.COLOR_BGR2RGB) |
|
input_batch = transform(img).to(device) |
|
|
|
print('\033[34mWAS NS:\033[0m Approximating depth from image.') |
|
|
|
with torch.no_grad(): |
|
prediction = midas(input_batch) |
|
prediction = torch.nn.functional.interpolate( |
|
prediction.unsqueeze(1), |
|
size=img.shape[:2], |
|
mode="bicubic", |
|
align_corners=False, |
|
).squeeze() |
|
|
|
if invert_depth == 'true': |
|
depth = ( 255 - prediction.cpu().numpy().astype(np.uint8) ) |
|
depth = depth.astype(np.float32) |
|
else: |
|
depth = prediction.cpu().numpy().astype(np.float32) |
|
depth = depth * 255 / (np.max(depth)) / 255 |
|
|
|
depth = cv.cvtColor(depth, cv.COLOR_GRAY2RGB) |
|
|
|
tensor = torch.from_numpy( depth )[None,] |
|
tensors = ( tensor, ) |
|
|
|
del midas, device, midas_transforms |
|
del transform, img, input_batch, prediction |
|
|
|
return tensors |
|
|
|
def install_midas(self): |
|
global MIDAS_INSTALLED |
|
if 'timm' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing timm...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'timm']) |
|
if 'opencv-python' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing CV2...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python']) |
|
MIDAS_INSTALLED = True |
|
|
|
|
|
|
|
class MiDaS_Background_Foreground_Removal: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(cls): |
|
return { |
|
"required": { |
|
"image": ("IMAGE",), |
|
"use_cpu": (["false", "true"],), |
|
"midas_model": (["DPT_Large", "DPT_Hybrid", "DPT_Small"],), |
|
"remove": (["background", "foregroud"],), |
|
"threshold": (["false", "true"],), |
|
"threshold_low": ("FLOAT", {"default": 10, "min": 0, "max": 255, "step": 1}), |
|
"threshold_mid": ("FLOAT", {"default": 200, "min": 0, "max": 255, "step": 1}), |
|
"threshold_high": ("FLOAT", {"default": 210, "min": 0, "max": 255, "step": 1}), |
|
"smoothing": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 16.0, "step": 0.01}), |
|
"background_red": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), |
|
"background_green": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), |
|
"background_blue": ("INT", {"default": 0, "min": 0, "max": 255, "step": 1}), |
|
}, |
|
} |
|
|
|
RETURN_TYPES = ("IMAGE","IMAGE") |
|
FUNCTION = "midas_remove" |
|
|
|
CATEGORY = "WAS Suite/Image" |
|
|
|
def midas_remove(self, |
|
image, |
|
midas_model, |
|
use_cpu='false', |
|
remove='background', |
|
threshold='false', |
|
threshold_low=0, |
|
threshold_mid=127, |
|
threshold_high=255, |
|
smoothing=0.25, |
|
background_red=0, |
|
background_green=0, |
|
background_blue=0): |
|
|
|
global MIDAS_INSTALLED |
|
|
|
if not MIDAS_INSTALLED: |
|
self.install_midas() |
|
|
|
import cv2 as cv |
|
|
|
|
|
i = 255. * image.cpu().numpy().squeeze() |
|
img = i |
|
|
|
img_original = tensor2pil(image).convert('RGB') |
|
|
|
print("\033[34mWAS NS:\033[0m Downloading and loading MiDaS Model...") |
|
midas = torch.hub.load("intel-isl/MiDaS", midas_model, trust_repo=True) |
|
device = torch.device("cuda") if torch.cuda.is_available() and use_cpu == 'false' else torch.device("cpu") |
|
|
|
print('\033[34mWAS NS:\033[0m MiDaS is using device:', device) |
|
|
|
midas.to(device).eval() |
|
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") |
|
|
|
if midas_model == "DPT_Large" or midas_model == "DPT_Hybrid": |
|
transform = midas_transforms.dpt_transform |
|
else: |
|
transform = midas_transforms.small_transform |
|
|
|
img = cv.cvtColor(img, cv.COLOR_BGR2RGB) |
|
input_batch = transform(img).to(device) |
|
|
|
print('\033[34mWAS NS:\033[0m Approximating depth from image.') |
|
|
|
with torch.no_grad(): |
|
prediction = midas(input_batch) |
|
prediction = torch.nn.functional.interpolate( |
|
prediction.unsqueeze(1), |
|
size=img.shape[:2], |
|
mode="bicubic", |
|
align_corners=False, |
|
).squeeze() |
|
|
|
|
|
if remove == 'foreground': |
|
depth = ( 255 - prediction.cpu().numpy().astype(np.uint8) ) |
|
depth = depth.astype(np.float32) |
|
else: |
|
depth = prediction.cpu().numpy().astype(np.float32) |
|
depth = depth * 255 / (np.max(depth)) / 255 |
|
depth = Image.fromarray(np.uint8(depth * 255)) |
|
|
|
|
|
if threshold == 'true': |
|
levels = self.AdjustLevels(threshold_low, threshold_mid, threshold_high) |
|
depth = levels.adjust(depth.convert('RGB')).convert('L') |
|
if smoothing > 0: |
|
depth = depth.filter(ImageFilter.GaussianBlur(radius=smoothing)) |
|
depth = depth.resize(img_original.size).convert('L') |
|
|
|
|
|
background_red = int(background_red) if isinstance(background_red, (int, float)) else 0 |
|
background_green = int(background_green) if isinstance(background_green, (int, float)) else 0 |
|
background_blue = int(background_blue) if isinstance(background_blue, (int, float)) else 0 |
|
|
|
|
|
background_color = ( background_red, background_green, background_blue ) |
|
|
|
|
|
background = Image.new(mode="RGB", size=img_original.size, color=background_color) |
|
|
|
|
|
result_img = Image.composite(img_original, background, depth) |
|
|
|
del midas, device, midas_transforms |
|
del transform, img, img_original, input_batch, prediction |
|
|
|
return ( pil2tensor(result_img), pil2tensor(depth.convert('RGB')) ) |
|
|
|
class AdjustLevels: |
|
def __init__(self, min_level, mid_level, max_level): |
|
self.min_level = min_level |
|
self.mid_level = mid_level |
|
self.max_level = max_level |
|
|
|
def adjust(self, im): |
|
|
|
|
|
|
|
im_arr = np.array(im) |
|
|
|
|
|
im_arr[im_arr < self.min_level] = self.min_level |
|
|
|
|
|
im_arr = (im_arr - self.min_level) * (255 / (self.max_level - self.min_level)) |
|
im_arr[im_arr < 0] = 0 |
|
im_arr[im_arr > 255] = 255 |
|
im_arr = im_arr.astype(np.uint8) |
|
|
|
|
|
im = Image.fromarray(im_arr) |
|
im = ImageOps.autocontrast(im, cutoff=self.max_level) |
|
|
|
return im |
|
|
|
def install_midas(self): |
|
global MIDAS_INSTALLED |
|
if 'timm' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing timm...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'timm']) |
|
if 'opencv-python' not in packages(): |
|
print("\033[34mWAS NS:\033[0m Installing CV2...") |
|
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', 'opencv-python']) |
|
MIDAS_INSTALLED = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class WAS_NSP_CLIPTextEncoder: |
|
def __init__(self): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"noodle_key": ("STRING", {"default": '__', "multiline": False}), |
|
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), |
|
"text": ("STRING", {"multiline": True}), |
|
"clip": ("CLIP",), |
|
} |
|
} |
|
|
|
OUTPUT_NODE = True |
|
RETURN_TYPES = ("CONDITIONING",) |
|
FUNCTION = "nsp_encode" |
|
|
|
CATEGORY = "WAS Suite/Conditioning" |
|
|
|
def nsp_encode(self, clip, text, noodle_key = '__', seed = 0): |
|
|
|
|
|
local_pantry = os.getcwd()+'/ComfyUI/custom_nodes/nsp_pantry.json' |
|
if not os.path.exists(local_pantry): |
|
response = urlopen('https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json') |
|
tmp_pantry = json.loads(response.read()) |
|
|
|
pantry_serialized = json.dumps(tmp_pantry, indent=4) |
|
with open(local_pantry, "w") as f: |
|
f.write(pantry_serialized) |
|
del response, tmp_pantry |
|
|
|
|
|
with open(local_pantry, 'r') as f: |
|
nspterminology = json.load(f) |
|
|
|
if seed > 0 or seed < 1: |
|
random.seed(seed) |
|
|
|
|
|
new_text = text |
|
for term in nspterminology: |
|
|
|
tkey = f'{noodle_key}{term}{noodle_key}' |
|
|
|
tcount = new_text.count(tkey) |
|
|
|
for _ in range(tcount): |
|
new_text = new_text.replace(tkey, random.choice(nspterminology[term]), 1) |
|
seed = seed+1 |
|
random.seed(seed) |
|
|
|
print('\033[34mWAS NS\033[0m CLIPTextEncode NSP:', new_text) |
|
|
|
return ([[clip.encode(new_text), {}]],{"ui":{"prompt":new_text}}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
class WAS_KSampler: |
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return {"required": |
|
{"model": ("MODEL",), |
|
"seed": ("SEED",), |
|
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), |
|
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}), |
|
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), |
|
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), |
|
"positive": ("CONDITIONING", ), |
|
"negative": ("CONDITIONING", ), |
|
"latent_image": ("LATENT", ), |
|
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), |
|
} |
|
} |
|
|
|
RETURN_TYPES = ("LATENT",) |
|
FUNCTION = "sample" |
|
|
|
CATEGORY = "WAS Suite/Sampling" |
|
|
|
def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0): |
|
return nodes.common_ksampler(model, seed['seed'], steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) |
|
|
|
|
|
|
|
class WAS_Seed: |
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return {"required": |
|
{"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})} |
|
} |
|
|
|
|
|
RETURN_TYPES = ("SEED",) |
|
FUNCTION = "seed" |
|
|
|
CATEGORY = "WAS Suite/Constant" |
|
|
|
def seed(self, seed): |
|
return ( {"seed": seed,}, ) |
|
|
|
|
|
|
|
|
|
|
|
|
|
class WAS_Text_Multiline: |
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"text": ("STRING", {"default": '', "multiline": True}), |
|
} |
|
} |
|
RETURN_TYPES = ("ASCII",) |
|
FUNCTION = "text_multiline" |
|
|
|
CATEGORY = "WAS Suite/Text" |
|
|
|
def text_multiline(self, text): |
|
return ( text, ) |
|
|
|
|
|
|
|
|
|
class WAS_Text_String: |
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"text": ("STRING", {"default": '', "multiline": False}), |
|
} |
|
} |
|
RETURN_TYPES = ("ASCII",) |
|
FUNCTION = "text_string" |
|
|
|
CATEGORY = "WAS Suite/Text" |
|
|
|
def text_string(self, text): |
|
return ( text, ) |
|
|
|
|
|
|
|
|
|
class WAS_Text_Random_Line: |
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"text": ("ASCII",), |
|
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), |
|
} |
|
} |
|
|
|
RETURN_TYPES = ("ASCII",) |
|
FUNCTION = "text_random_line" |
|
|
|
CATEGORY = "WAS Suite/Text" |
|
|
|
def text_random_line(self, text, seed): |
|
lines = text.split("\n") |
|
random.seed(seed) |
|
choice = random.choice(lines) |
|
print('\033[34mWAS NS\033[0m Random Line:', choice) |
|
return ( choice, ) |
|
|
|
|
|
|
|
|
|
class WAS_Text_Concatenate: |
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"text_a": ("ASCII",), |
|
"text_b": ("ASCII",), |
|
"linebreak_addition": (['true','false'], ), |
|
} |
|
} |
|
|
|
RETURN_TYPES = ("ASCII",) |
|
FUNCTION = "text_concatenate" |
|
|
|
CATEGORY = "WAS Suite/Text" |
|
|
|
def text_concatenate(self, text_a, text_b, linebreak_addition): |
|
return ( text_a + ("\n" if linebreak_addition == 'true' else '') + text_b, ) |
|
|
|
|
|
|
|
|
|
class WAS_Search_and_Replace: |
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"text": ("ASCII",), |
|
"find": ("STRING", {"default": '', "multiline": False}), |
|
"replace": ("STRING", {"default": '', "multiline": False}), |
|
} |
|
} |
|
|
|
RETURN_TYPES = ("ASCII",) |
|
FUNCTION = "text_search_and_replace" |
|
|
|
CATEGORY = "WAS Suite/Text" |
|
|
|
def text_search_and_replace(self, text, find, replace): |
|
return ( self.replace_substring(text, find, replace), ) |
|
|
|
def replace_substring(self, text, find, replace): |
|
import re |
|
text = re.sub(find, replace, text) |
|
return text |
|
|
|
|
|
|
|
|
|
class WAS_Text_Parse_NSP: |
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"noodle_key": ("STRING", {"default": '__', "multiline": False}), |
|
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), |
|
"text": ("ASCII",), |
|
} |
|
} |
|
|
|
OUTPUT_NODE = True |
|
RETURN_TYPES = ("ASCII",) |
|
FUNCTION = "text_parse_nsp" |
|
|
|
CATEGORY = "WAS Suite/Text" |
|
|
|
def text_parse_nsp(self, text, noodle_key = '__', seed = 0): |
|
|
|
|
|
local_pantry = os.getcwd()+'/ComfyUI/custom_nodes/nsp_pantry.json' |
|
if not os.path.exists(local_pantry): |
|
response = urlopen('https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json') |
|
tmp_pantry = json.loads(response.read()) |
|
|
|
pantry_serialized = json.dumps(tmp_pantry, indent=4) |
|
with open(local_pantry, "w") as f: |
|
f.write(pantry_serialized) |
|
del response, tmp_pantry |
|
|
|
|
|
with open(local_pantry, 'r') as f: |
|
nspterminology = json.load(f) |
|
|
|
if seed > 0 or seed < 1: |
|
random.seed(seed) |
|
|
|
|
|
new_text = text |
|
for term in nspterminology: |
|
|
|
tkey = f'{noodle_key}{term}{noodle_key}' |
|
|
|
tcount = new_text.count(tkey) |
|
|
|
for _ in range(tcount): |
|
new_text = new_text.replace(tkey, random.choice(nspterminology[term]), 1) |
|
seed = seed+1 |
|
random.seed(seed) |
|
|
|
print('\033[34mWAS NS\033[0m Text Parse NSP:', new_text) |
|
|
|
return ( new_text, ) |
|
|
|
|
|
|
|
|
|
class WAS_Text_Save: |
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"text": ("ASCII",), |
|
"path": ("STRING", {"default": '', "multiline": False}), |
|
"filename": ("STRING", {"default": f'text_[time]', "multiline": False}), |
|
} |
|
} |
|
|
|
OUTPUT_NODE = True |
|
RETURN_TYPES = () |
|
FUNCTION = "save_text_file" |
|
|
|
CATEGORY = "WAS Suite/Text" |
|
|
|
def save_text_file(self, text, path, filename): |
|
|
|
|
|
if not os.path.exists(path): |
|
print(f'\033[34mWAS NS\033[0m Error: The path `{path}` doesn\'t exist!') |
|
|
|
|
|
if text.strip == '': |
|
print(f'\033[34mWAS NS\033[0m Error: There is no text specified to save! Text is empty.') |
|
|
|
|
|
tokens = { |
|
'[time]': f'{round(time.time())}', |
|
} |
|
for k in tokens.keys(): |
|
text = self.replace_substring(text, k, tokens[k]) |
|
|
|
|
|
self.writeTextFile(os.path.join(path, filename + '.txt'), text) |
|
|
|
return( text, ) |
|
|
|
|
|
def writeTextFile(self, file, content): |
|
try: |
|
with open(file, 'w') as f: |
|
f.write(content) |
|
except OSError: |
|
print(f'\033[34mWAS Node Suite\033[0m Error: Unable to save file `{file}`') |
|
|
|
|
|
|
|
def replace_substring(self, text, find, replace): |
|
import re |
|
text = re.sub(find, replace, text) |
|
return text |
|
|
|
|
|
|
|
|
|
class WAS_Text_to_Conditioning: |
|
def __init__(s): |
|
pass |
|
|
|
@classmethod |
|
def INPUT_TYPES(s): |
|
return { |
|
"required": { |
|
"clip": ("CLIP",), |
|
"text": ("ASCII",), |
|
} |
|
} |
|
|
|
RETURN_TYPES = ("CONDITIONING",) |
|
FUNCTION = "text_to_conditioning" |
|
|
|
CATEGORY = "WAS Suite/Text" |
|
|
|
def text_to_conditioning(self, clip, text): |
|
return ( [[clip.encode(text), {}]], ) |
|
|
|
|
|
|
|
|
|
NODE_CLASS_MAPPINGS = { |
|
|
|
"Image Filter Adjustments": WAS_Image_Filters, |
|
"Image Style Filter": WAS_Image_Style_Filter, |
|
"Image Blending Mode": WAS_Image_Blending_Mode, |
|
"Image Blend": WAS_Image_Blend, |
|
"Image Blend by Mask": WAS_Image_Blend_Mask, |
|
"Image Remove Color": WAS_Image_Remove_Color, |
|
"Image Threshold": WAS_Image_Threshold, |
|
"Image Chromatic Aberration": WAS_Image_Chromatic_Aberration, |
|
"Image Bloom Filter": WAS_Image_Bloom_Filter, |
|
"Image Blank": WAS_Image_Blank, |
|
"Image Film Grain": WAS_Film_Grain, |
|
"Image Flip": WAS_Image_Flip, |
|
"Image Rotate": WAS_Image_Rotate, |
|
"Image Nova Filter": WAS_Image_Nova_Filter, |
|
"Image Canny Filter": WAS_Canny_Filter, |
|
"Image Edge Detection Filter": WAS_Image_Edge, |
|
"Image fDOF Filter": WAS_Image_fDOF, |
|
"Image Median Filter": WAS_Image_Median_Filter, |
|
"Image Save": WAS_Image_Save, |
|
"Image Load": WAS_Load_Image, |
|
"Image Levels Adjustment": WAS_Image_Levels, |
|
"Image High Pass Filter": WAS_Image_High_Pass_Filter, |
|
"Tensor Batch to Image": WAS_Tensor_Batch_to_Image, |
|
"Image Select Color": WAS_Image_Select_Color, |
|
"Image Select Channel": WAS_Image_Select_Channel, |
|
"Image Mix RGB Channels": WAS_Image_RGB_Merge, |
|
|
|
"Latent Upscale by Factor (WAS)": WAS_Latent_Upscale, |
|
"Latent Noise Injection": WAS_Latent_Noise, |
|
"Image to Latent Mask": WAS_Image_To_Mask, |
|
|
|
"MiDaS Depth Approximation": MiDaS_Depth_Approx, |
|
"MiDaS Mask Image": MiDaS_Background_Foreground_Removal, |
|
|
|
"CLIPTextEncode (NSP)": WAS_NSP_CLIPTextEncoder, |
|
|
|
"KSampler (WAS)": WAS_KSampler, |
|
"Seed": WAS_Seed, |
|
|
|
"Text Multiline": WAS_Text_Multiline, |
|
"Text String": WAS_Text_String, |
|
"Text Random Line": WAS_Text_Random_Line, |
|
"Text to Conditioning": WAS_Text_to_Conditioning, |
|
"Text Concatenate": WAS_Text_Concatenate, |
|
"Text Find and Replace": WAS_Search_and_Replace, |
|
"Text Parse Noodle Soup Prompts": WAS_Text_Parse_NSP, |
|
"Save Text File": WAS_Text_Save, |
|
} |
|
|
|
print('\033[34mWAS Node Suite: \033[92mLoaded\033[0m') |