From 022a9f271b677291c4a4988397695bd3a91666b5 Mon Sep 17 00:00:00 2001 From: mligaintart <> Date: Wed, 5 Apr 2023 19:52:39 -0400 Subject: [PATCH 01/11] Adds masking to Latent Composite, and provides new masking utilities to allow better compositing. --- comfy_extras/nodes_mask.py | 237 +++++++++++++++++++++++++++++++++++++ nodes.py | 87 ++++++++------ 2 files changed, 291 insertions(+), 33 deletions(-) create mode 100644 comfy_extras/nodes_mask.py diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py new file mode 100644 index 000000000..ba39680a7 --- /dev/null +++ b/comfy_extras/nodes_mask.py @@ -0,0 +1,237 @@ +import torch + +from nodes import MAX_RESOLUTION + +class LatentCompositeMasked: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "destination": ("LATENT",), + "source": ("LATENT",), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + }, + "optional": { + "mask": ("MASK",), + } + } + RETURN_TYPES = ("LATENT",) + FUNCTION = "composite" + + CATEGORY = "latent" + + def composite(self, destination, source, x, y, mask = None): + output = destination.copy() + destination = destination["samples"].clone() + source = source["samples"] + + left, top = (x // 8, y // 8) + right, bottom = (left + source.shape[3], top + source.shape[2],) + + + if mask is None: + mask = torch.ones_like(source) + else: + mask = mask.clone() + mask = torch.nn.functional.interpolate(mask[None, None], size=(source.shape[2], source.shape[3]), mode="bilinear") + mask = mask.repeat((source.shape[0], source.shape[1], 1, 1)) + + # calculate the bounds of the source that will be overlapping the destination + # this prevents the source trying to overwrite latent pixels that are out of bounds + # of the destination + visible_width, visible_height = (destination.shape[3] - left, destination.shape[2] - top,) + + mask = mask[:, :, :visible_height, :visible_width] + inverse_mask = torch.ones_like(mask) - mask + + source_portion = mask * source[:, :, :visible_height, :visible_width] + destination_portion = inverse_mask * destination[:, :, top:bottom, left:right] + + destination[:, :, top:bottom, left:right] = source_portion + destination_portion + + output["samples"] = destination + + return (output,) + +class MaskToImage: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + } + } + + CATEGORY = "mask" + + RETURN_TYPES = ("IMAGE",) + + FUNCTION = "convert" + + def convert(self, mask): + image = torch.cat([torch.reshape(mask.clone(), [1, mask.shape[0], mask.shape[1], 1,])] * 3, 3) + + return (image,) + +class SolidMask: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + } + } + + CATEGORY = "mask" + + RETURN_TYPES = ("MASK",) + + FUNCTION = "solid" + + def solid(self, value, width, height): + out = torch.full((height, width), value, dtype=torch.float32, device="cpu") + return (out,) + +class InvertMask: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + } + } + + CATEGORY = "mask" + + RETURN_TYPES = ("MASK",) + + FUNCTION = "invert" + + def invert(self, mask): + out = 1.0 - mask + return (out,) + +class CropMask: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + } + } + + CATEGORY = "mask" + + RETURN_TYPES = ("MASK",) + + FUNCTION = "crop" + + def crop(self, mask, x, y, width, height): + out = mask[y:y + height, x:x + width] + return (out,) + +class MaskComposite: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "destination": ("MASK",), + "source": ("MASK",), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "operation": (["multiply", "add", "subtract"],), + } + } + + CATEGORY = "mask" + + RETURN_TYPES = ("MASK",) + + FUNCTION = "combine" + + def combine(self, destination, source, x, y, operation): + output = destination.clone() + + left, top = (x, y,) + right, bottom = (min(left + source.shape[1], destination.shape[1]), min(top + source.shape[0], destination.shape[0])) + visible_width, visible_height = (right - left, bottom - top,) + + source_portion = source[:visible_height, :visible_width] + destination_portion = destination[top:bottom, left:right] + + match operation: + case "multiply": + output[top:bottom, left:right] = destination_portion * source_portion + case "add": + output[top:bottom, left:right] = destination_portion + source_portion + case "subtract": + output[top:bottom, left:right] = destination_portion - source_portion + + output = torch.clamp(output, 0.0, 1.0) + + return (output,) + +class FeatherMask: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + } + } + + CATEGORY = "mask" + + RETURN_TYPES = ("MASK",) + + FUNCTION = "feather" + + def feather(self, mask, left, top, right, bottom): + output = mask.clone() + + left = min(left, output.shape[1]) + right = min(right, output.shape[1]) + top = min(top, output.shape[0]) + bottom = min(bottom, output.shape[0]) + + for x in range(left): + feather_rate = (x + 1.0) / left + output[:, x] *= feather_rate + + for x in range(right): + feather_rate = (x + 1) / right + output[:, -x] *= feather_rate + + for y in range(top): + feather_rate = (y + 1) / top + output[y, :] *= feather_rate + + for y in range(bottom): + feather_rate = (y + 1) / bottom + output[-y, :] *= feather_rate + + return (output,) + + + +NODE_CLASS_MAPPINGS = { + "LatentCompositeMasked": LatentCompositeMasked, + "MaskToImage": MaskToImage, + "SolidMask": SolidMask, + "InvertMask": InvertMask, + "CropMask": CropMask, + "MaskComposite": MaskComposite, + "FeatherMask": FeatherMask, +} + diff --git a/nodes.py b/nodes.py index 187d54a11..eac232d5f 100644 --- a/nodes.py +++ b/nodes.py @@ -553,44 +553,64 @@ class LatentFlip: class LatentComposite: @classmethod def INPUT_TYPES(s): - return {"required": { "samples_to": ("LATENT",), - "samples_from": ("LATENT",), - "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - }} + return { + "required": { + "samples_to": ("LATENT",), + "samples_from": ("LATENT",), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + } + } RETURN_TYPES = ("LATENT",) FUNCTION = "composite" CATEGORY = "latent" - def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0): - x = x // 8 - y = y // 8 - feather = feather // 8 - samples_out = samples_to.copy() - s = samples_to["samples"].clone() - samples_to = samples_to["samples"] - samples_from = samples_from["samples"] - if feather == 0: - s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] - else: - samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] - mask = torch.ones_like(samples_from) - for t in range(feather): - if y != 0: - mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1)) + def composite(self, samples_to, samples_from, x, y, feather): + output = samples_to.copy() + destination = samples_to["samples"].clone() + source = samples_from["samples"] - if y + samples_from.shape[2] < samples_to.shape[2]: - mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1)) - if x != 0: - mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1)) - if x + samples_from.shape[3] < samples_to.shape[3]: - mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1)) - rev_mask = torch.ones_like(mask) - mask - s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask - samples_out["samples"] = s - return (samples_out,) + left, top = (x // 8, y // 8) + right, bottom = (left + source.shape[3], top + source.shape[2],) + feather = feather // 8 + + + + # calculate the bounds of the source that will be overlapping the destination + # this prevents the source trying to overwrite latent pixels that are out of bounds + # of the destination + visible_width, visible_height = (destination.shape[3] - left, destination.shape[2] - top,) + + mask = torch.ones_like(source) + + for f in range(feather): + feather_rate = (f + 1.0) / feather + + if left > 0: + mask[:, :, :, f] *= feather_rate + + if right < destination.shape[3] - 1: + mask[:, :, :, -f] *= feather_rate + + if top > 0: + mask[:, :, f, :] *= feather_rate + + if bottom < destination.shape[2] - 1: + mask[:, :, -f, :] *= feather_rate + + mask = mask[:, :, :visible_height, :visible_width] + inverse_mask = torch.ones_like(mask) - mask + + source_portion = mask * source[:, :, :visible_height, :visible_width] + destination_portion = inverse_mask * destination[:, :, top:bottom, left:right] + + destination[:, :, top:bottom, left:right] = source_portion + destination_portion + + output["samples"] = destination + + return (output,) class LatentCrop: @classmethod @@ -907,7 +927,7 @@ class LoadImageMask: "channel": (["alpha", "red", "green", "blue"], ),} } - CATEGORY = "image" + CATEGORY = "mask" RETURN_TYPES = ("MASK",) FUNCTION = "load_image" @@ -1114,3 +1134,4 @@ def init_custom_nodes(): load_custom_nodes() load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) + load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py")) From 2dc7257e292cad08876e7f188e2fbb2f2abb6644 Mon Sep 17 00:00:00 2001 From: omar92 Date: Sat, 8 Apr 2023 18:58:47 +0200 Subject: [PATCH 02/11] Allow connect premitive Node to "comfyiUI-nodes that have forceInput setting" --- web/extensions/core/widgetInputs.js | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 865af7763..f4d2d22de 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -233,7 +233,9 @@ app.registerExtension({ // Fires before the link is made allowing us to reject it if it isn't valid // No widget, we cant connect - if (!input.widget) return false; + if (!input.widget) { + if (!(input.type in ComfyWidgets)) return false; + } if (this.outputs[slot].links?.length) { return this.#isValidConnection(input); @@ -252,9 +254,18 @@ app.registerExtension({ const input = theirNode.inputs[link.target_slot]; if (!input) return; - const widget = input.widget; - const { type, linkType } = getWidgetType(widget.config); + var _widget; + if (!input.widget) { + if (!(input.type in ComfyWidgets)) return; + _widget = { "name": input.name, "config": [input.type, {}] }//fake widget + } else { + _widget = input.widget; + } + + const widget = _widget; + const { type, linkType } = getWidgetType(widget.config); + console.log({ "input": input }); // Update our output to restrict to the widget type this.outputs[0].type = linkType; this.outputs[0].name = type; @@ -274,7 +285,7 @@ app.registerExtension({ if (type in ComfyWidgets) { widget = (ComfyWidgets[type](this, "value", inputData, app) || {}).widget; } else { - widget = this.addWidget(type, "value", null, () => {}, {}); + widget = this.addWidget(type, "value", null, () => { }, {}); } if (node?.widgets && widget) { From 9d095c52f3d9fc65477abae380cf8ba6d8b271dd Mon Sep 17 00:00:00 2001 From: omar92 Date: Sat, 8 Apr 2023 19:05:22 +0200 Subject: [PATCH 03/11] handle double click create primitive widget --- web/extensions/core/widgetInputs.js | 43 +++++++++++++++-------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index f4d2d22de..28c5aee1d 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -159,27 +159,31 @@ app.registerExtension({ const r = origOnInputDblClick ? origOnInputDblClick.apply(this, arguments) : undefined; const input = this.inputs[slot]; - if (input.widget && !input[ignoreDblClick]) { - const node = LiteGraph.createNode("PrimitiveNode"); - app.graph.add(node); - - // Calculate a position that wont directly overlap another node - const pos = [this.pos[0] - node.size[0] - 30, this.pos[1]]; - while (isNodeAtPos(pos)) { - pos[1] += LiteGraph.NODE_TITLE_HEIGHT; - } - - node.pos = pos; - node.connect(0, this, slot); - node.title = input.name; - - // Prevent adding duplicates due to triple clicking - input[ignoreDblClick] = true; - setTimeout(() => { - delete input[ignoreDblClick]; - }, 300); + if (!input.widget || !input[ignoreDblClick])// Not a widget input or already handled input + { + if (!(input.type in ComfyWidgets)) return r;//also Not a ComfyWidgets input (do nothing) } + // Create a primitive node + const node = LiteGraph.createNode("PrimitiveNode"); + app.graph.add(node); + + // Calculate a position that wont directly overlap another node + const pos = [this.pos[0] - node.size[0] - 30, this.pos[1]]; + while (isNodeAtPos(pos)) { + pos[1] += LiteGraph.NODE_TITLE_HEIGHT; + } + + node.pos = pos; + node.connect(0, this, slot); + node.title = input.name; + + // Prevent adding duplicates due to triple clicking + input[ignoreDblClick] = true; + setTimeout(() => { + delete input[ignoreDblClick]; + }, 300); + return r; }; }, @@ -265,7 +269,6 @@ app.registerExtension({ const widget = _widget; const { type, linkType } = getWidgetType(widget.config); - console.log({ "input": input }); // Update our output to restrict to the widget type this.outputs[0].type = linkType; this.outputs[0].name = type; From e12fb88b1b84e354872c0d761544558479bcfad2 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 11 Apr 2023 16:49:39 -0600 Subject: [PATCH 04/11] Image/mask conversion nodes --- nodes.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/nodes.py b/nodes.py index 14a73bcd7..ecd931d69 100644 --- a/nodes.py +++ b/nodes.py @@ -1059,6 +1059,43 @@ class ImagePadForOutpaint: return (new_image, mask) +class ImageToMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "channel": (["red", "green", "blue"],), + } + } + + CATEGORY = "image" + + RETURN_TYPES = ("MASK",) + FUNCTION = "image_to_mask" + + def image_to_mask(self, image, channel): + channels = ["red", "green", "blue"] + mask = torch.select(image[0], 2, channels.index(channel)) + return (mask,) + +class MaskToImage: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + } + } + + CATEGORY = "image" + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "mask_to_image" + + def mask_to_image(self, mask): + result = mask[None, :, :, None].expand(-1, -1, -1, 3) + return (result,) NODE_CLASS_MAPPINGS = { "KSampler": KSampler, @@ -1102,6 +1139,8 @@ NODE_CLASS_MAPPINGS = { "unCLIPCheckpointLoader": unCLIPCheckpointLoader, "CheckpointLoader": CheckpointLoader, "DiffusersLoader": DiffusersLoader, + "ImageToMask": ImageToMask, + "MaskToImage": MaskToImage, } NODE_DISPLAY_NAME_MAPPINGS = { @@ -1147,6 +1186,8 @@ NODE_DISPLAY_NAME_MAPPINGS = { "ImageUpscaleWithModel": "Upscale Image (using Model)", "ImageInvert": "Invert Image", "ImagePadForOutpaint": "Pad Image for Outpainting", + "ImageToMask": "Convert Image to Mask", + "MaskToImage": "Convert Mask to Image", # _for_testing "VAEDecodeTiled": "VAE Decode (Tiled)", "VAEEncodeTiled": "VAE Encode (Tiled)", From e1d289c1ec6894e15af0b57b6630b853341c61fa Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 11 Apr 2023 20:26:24 -0600 Subject: [PATCH 05/11] use slice instead of torch.select() --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index ecd931d69..815631f58 100644 --- a/nodes.py +++ b/nodes.py @@ -1076,7 +1076,7 @@ class ImageToMask: def image_to_mask(self, image, channel): channels = ["red", "green", "blue"] - mask = torch.select(image[0], 2, channels.index(channel)) + mask = image[0, :, :, channels.index(channel)] return (mask,) class MaskToImage: From 9371924e654128258cc82419e83c2a788a32e2be Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 13 Apr 2023 03:11:17 -0600 Subject: [PATCH 06/11] Move mask conversion to separate file --- comfy_extras/nodes_mask_conversion.py | 54 +++++++++++++++++++++++++++ nodes.py | 42 +-------------------- 2 files changed, 55 insertions(+), 41 deletions(-) create mode 100644 comfy_extras/nodes_mask_conversion.py diff --git a/comfy_extras/nodes_mask_conversion.py b/comfy_extras/nodes_mask_conversion.py new file mode 100644 index 000000000..04dcbd0d9 --- /dev/null +++ b/comfy_extras/nodes_mask_conversion.py @@ -0,0 +1,54 @@ +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image + +import comfy.utils + +class ImageToMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "channel": (["red", "green", "blue"],), + } + } + + CATEGORY = "image" + + RETURN_TYPES = ("MASK",) + FUNCTION = "image_to_mask" + + def image_to_mask(self, image, channel): + channels = ["red", "green", "blue"] + mask = image[0, :, :, channels.index(channel)] + return (mask,) + +class MaskToImage: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + } + } + + CATEGORY = "image" + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "mask_to_image" + + def mask_to_image(self, mask): + result = mask[None, :, :, None].expand(-1, -1, -1, 3) + return (result,) + +NODE_CLASS_MAPPINGS = { + "ImageToMask": ImageToMask, + "MaskToImage": MaskToImage, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ImageToMask": "Convert Image to Mask", + "MaskToImage": "Convert Mask to Image", +} diff --git a/nodes.py b/nodes.py index 325e3ba68..3ed9cf499 100644 --- a/nodes.py +++ b/nodes.py @@ -1061,43 +1061,6 @@ class ImagePadForOutpaint: return (new_image, mask) -class ImageToMask: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "channel": (["red", "green", "blue"],), - } - } - - CATEGORY = "image" - - RETURN_TYPES = ("MASK",) - FUNCTION = "image_to_mask" - - def image_to_mask(self, image, channel): - channels = ["red", "green", "blue"] - mask = image[0, :, :, channels.index(channel)] - return (mask,) - -class MaskToImage: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask": ("MASK",), - } - } - - CATEGORY = "image" - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "mask_to_image" - - def mask_to_image(self, mask): - result = mask[None, :, :, None].expand(-1, -1, -1, 3) - return (result,) NODE_CLASS_MAPPINGS = { "KSampler": KSampler, @@ -1141,8 +1104,6 @@ NODE_CLASS_MAPPINGS = { "unCLIPCheckpointLoader": unCLIPCheckpointLoader, "CheckpointLoader": CheckpointLoader, "DiffusersLoader": DiffusersLoader, - "ImageToMask": ImageToMask, - "MaskToImage": MaskToImage, } NODE_DISPLAY_NAME_MAPPINGS = { @@ -1188,8 +1149,6 @@ NODE_DISPLAY_NAME_MAPPINGS = { "ImageUpscaleWithModel": "Upscale Image (using Model)", "ImageInvert": "Invert Image", "ImagePadForOutpaint": "Pad Image for Outpainting", - "ImageToMask": "Convert Image to Mask", - "MaskToImage": "Convert Mask to Image", # _for_testing "VAEDecodeTiled": "VAE Decode (Tiled)", "VAEEncodeTiled": "VAE Encode (Tiled)", @@ -1233,3 +1192,4 @@ def init_custom_nodes(): load_custom_nodes() load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) + load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask_conversion.py")) From d2337a86fe6fb97ed9d818635083fcf1dc2bafc0 Mon Sep 17 00:00:00 2001 From: Gavroche CryptoRUSH <95258328+CryptoRUSHGav@users.noreply.github.com> Date: Thu, 13 Apr 2023 16:38:02 -0400 Subject: [PATCH 07/11] remove extra semi-colon --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 946c66857..f13626771 100644 --- a/nodes.py +++ b/nodes.py @@ -871,7 +871,7 @@ class SaveImage: "filename": file, "subfolder": subfolder, "type": self.type - }); + }) counter += 1 return { "ui": { "images": results } } From 35a2c790b60f836371f8955c96661e929712619e Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 14 Apr 2023 00:12:15 -0400 Subject: [PATCH 08/11] Update comfy_extras/nodes_mask.py Co-authored-by: missionfloyd --- comfy_extras/nodes_mask.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index ba39680a7..ab17fc509 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -9,8 +9,8 @@ class LatentCompositeMasked: "required": { "destination": ("LATENT",), "source": ("LATENT",), - "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "x": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 8}), + "y": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 8}), }, "optional": { "mask": ("MASK",), @@ -26,6 +26,9 @@ class LatentCompositeMasked: destination = destination["samples"].clone() source = source["samples"] + x = max(-source.shape[3] * 8, min(x, destination.shape[3] * 8)) + y = max(-source.shape[2] * 8, min(y, destination.shape[2] * 8)) + left, top = (x // 8, y // 8) right, bottom = (left + source.shape[3], top + source.shape[2],) @@ -40,7 +43,7 @@ class LatentCompositeMasked: # calculate the bounds of the source that will be overlapping the destination # this prevents the source trying to overwrite latent pixels that are out of bounds # of the destination - visible_width, visible_height = (destination.shape[3] - left, destination.shape[2] - top,) + visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),) mask = mask[:, :, :visible_height, :visible_width] inverse_mask = torch.ones_like(mask) - mask From 1a7cda715b3c01ef89b16c5cc96784ca4efa313c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 14 Apr 2023 00:14:35 -0400 Subject: [PATCH 09/11] Revert LatentComposite. --- nodes.py | 82 +++++++++++++++++++++----------------------------------- 1 file changed, 31 insertions(+), 51 deletions(-) diff --git a/nodes.py b/nodes.py index 661f879ac..6468ac6b8 100644 --- a/nodes.py +++ b/nodes.py @@ -578,64 +578,44 @@ class LatentFlip: class LatentComposite: @classmethod def INPUT_TYPES(s): - return { - "required": { - "samples_to": ("LATENT",), - "samples_from": ("LATENT",), - "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - } - } + return {"required": { "samples_to": ("LATENT",), + "samples_from": ("LATENT",), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "feather": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + }} RETURN_TYPES = ("LATENT",) FUNCTION = "composite" CATEGORY = "latent" - def composite(self, samples_to, samples_from, x, y, feather): - output = samples_to.copy() - destination = samples_to["samples"].clone() - source = samples_from["samples"] - - left, top = (x // 8, y // 8) - right, bottom = (left + source.shape[3], top + source.shape[2],) + def composite(self, samples_to, samples_from, x, y, composite_method="normal", feather=0): + x = x // 8 + y = y // 8 feather = feather // 8 + samples_out = samples_to.copy() + s = samples_to["samples"].clone() + samples_to = samples_to["samples"] + samples_from = samples_from["samples"] + if feather == 0: + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] + else: + samples_from = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] + mask = torch.ones_like(samples_from) + for t in range(feather): + if y != 0: + mask[:,:,t:1+t,:] *= ((1.0/feather) * (t + 1)) - - - # calculate the bounds of the source that will be overlapping the destination - # this prevents the source trying to overwrite latent pixels that are out of bounds - # of the destination - visible_width, visible_height = (destination.shape[3] - left, destination.shape[2] - top,) - - mask = torch.ones_like(source) - - for f in range(feather): - feather_rate = (f + 1.0) / feather - - if left > 0: - mask[:, :, :, f] *= feather_rate - - if right < destination.shape[3] - 1: - mask[:, :, :, -f] *= feather_rate - - if top > 0: - mask[:, :, f, :] *= feather_rate - - if bottom < destination.shape[2] - 1: - mask[:, :, -f, :] *= feather_rate - - mask = mask[:, :, :visible_height, :visible_width] - inverse_mask = torch.ones_like(mask) - mask - - source_portion = mask * source[:, :, :visible_height, :visible_width] - destination_portion = inverse_mask * destination[:, :, top:bottom, left:right] - - destination[:, :, top:bottom, left:right] = source_portion + destination_portion - - output["samples"] = destination - - return (output,) + if y + samples_from.shape[2] < samples_to.shape[2]: + mask[:,:,mask.shape[2] -1 -t: mask.shape[2]-t,:] *= ((1.0/feather) * (t + 1)) + if x != 0: + mask[:,:,:,t:1+t] *= ((1.0/feather) * (t + 1)) + if x + samples_from.shape[3] < samples_to.shape[3]: + mask[:,:,:,mask.shape[3]- 1 - t: mask.shape[3]- t] *= ((1.0/feather) * (t + 1)) + rev_mask = torch.ones_like(mask) - mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] = samples_from[:,:,:samples_to.shape[2] - y, :samples_to.shape[3] - x] * mask + s[:,:,y:y+samples_from.shape[2],x:x+samples_from.shape[3]] * rev_mask + samples_out["samples"] = s + return (samples_out,) class LatentCrop: @classmethod From f48f0872e2310b1650f798d02e94264cc06afd69 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 14 Apr 2023 00:21:01 -0400 Subject: [PATCH 10/11] Refactor: move nodes_mask_convertion nodes to nodes_mask. --- comfy_extras/nodes_mask.py | 39 +++++++++++++++---- comfy_extras/nodes_mask_conversion.py | 54 --------------------------- nodes.py | 1 - 3 files changed, 31 insertions(+), 63 deletions(-) delete mode 100644 comfy_extras/nodes_mask_conversion.py diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index ab17fc509..60feea0db 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -59,23 +59,41 @@ class LatentCompositeMasked: class MaskToImage: @classmethod - def INPUT_TYPES(cls): + def INPUT_TYPES(s): return { - "required": { - "mask": ("MASK",), - } + "required": { + "mask": ("MASK",), + } } CATEGORY = "mask" RETURN_TYPES = ("IMAGE",) + FUNCTION = "mask_to_image" - FUNCTION = "convert" + def mask_to_image(self, mask): + result = mask[None, :, :, None].expand(-1, -1, -1, 3) + return (result,) - def convert(self, mask): - image = torch.cat([torch.reshape(mask.clone(), [1, mask.shape[0], mask.shape[1], 1,])] * 3, 3) +class ImageToMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "channel": (["red", "green", "blue"],), + } + } - return (image,) + CATEGORY = "mask" + + RETURN_TYPES = ("MASK",) + FUNCTION = "image_to_mask" + + def image_to_mask(self, image, channel): + channels = ["red", "green", "blue"] + mask = image[0, :, :, channels.index(channel)] + return (mask,) class SolidMask: @classmethod @@ -231,6 +249,7 @@ class FeatherMask: NODE_CLASS_MAPPINGS = { "LatentCompositeMasked": LatentCompositeMasked, "MaskToImage": MaskToImage, + "ImageToMask": ImageToMask, "SolidMask": SolidMask, "InvertMask": InvertMask, "CropMask": CropMask, @@ -238,3 +257,7 @@ NODE_CLASS_MAPPINGS = { "FeatherMask": FeatherMask, } +NODE_DISPLAY_NAME_MAPPINGS = { + "ImageToMask": "Convert Image to Mask", + "MaskToImage": "Convert Mask to Image", +} diff --git a/comfy_extras/nodes_mask_conversion.py b/comfy_extras/nodes_mask_conversion.py deleted file mode 100644 index 04dcbd0d9..000000000 --- a/comfy_extras/nodes_mask_conversion.py +++ /dev/null @@ -1,54 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F -from PIL import Image - -import comfy.utils - -class ImageToMask: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "channel": (["red", "green", "blue"],), - } - } - - CATEGORY = "image" - - RETURN_TYPES = ("MASK",) - FUNCTION = "image_to_mask" - - def image_to_mask(self, image, channel): - channels = ["red", "green", "blue"] - mask = image[0, :, :, channels.index(channel)] - return (mask,) - -class MaskToImage: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask": ("MASK",), - } - } - - CATEGORY = "image" - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "mask_to_image" - - def mask_to_image(self, mask): - result = mask[None, :, :, None].expand(-1, -1, -1, 3) - return (result,) - -NODE_CLASS_MAPPINGS = { - "ImageToMask": ImageToMask, - "MaskToImage": MaskToImage, -} - -NODE_DISPLAY_NAME_MAPPINGS = { - "ImageToMask": "Convert Image to Mask", - "MaskToImage": "Convert Mask to Image", -} diff --git a/nodes.py b/nodes.py index aff03dd43..6468ac6b8 100644 --- a/nodes.py +++ b/nodes.py @@ -1193,4 +1193,3 @@ def init_custom_nodes(): load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask_conversion.py")) From d98a4de9eb6b676bfe9c172e7310934148e16dd2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 14 Apr 2023 00:49:19 -0400 Subject: [PATCH 11/11] LatentCompositeMasked: negative x, y don't work. --- comfy_extras/nodes_mask.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 60feea0db..4dfb0b93e 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -9,8 +9,8 @@ class LatentCompositeMasked: "required": { "destination": ("LATENT",), "source": ("LATENT",), - "x": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 8}), - "y": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 8}), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), }, "optional": { "mask": ("MASK",),