Add support for unCLIP SD2.x models.

See _for_testing/unclip in the UI for the new nodes.

unCLIPCheckpointLoader is used to load them.

unCLIPConditioning is used to add the image cond and takes as input a
CLIPVisionEncode output which has been moved to the conditioning section.
This commit is contained in:
comfyanonymous
2023-04-01 23:19:15 -04:00
parent 0d972b85e6
commit 809bcc8ceb
17 changed files with 593 additions and 113 deletions

View File

@@ -18,7 +18,7 @@ import comfy.samplers
import comfy.sd
import comfy.utils
import comfy_extras.clip_vision
import comfy.clip_vision
import model_management
import importlib
@@ -219,6 +219,21 @@ class CheckpointLoaderSimple:
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return out
class unCLIPCheckpointLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
}}
RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION")
FUNCTION = "load_checkpoint"
CATEGORY = "_for_testing/unclip"
def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return out
class CLIPSetLastLayer:
@classmethod
def INPUT_TYPES(s):
@@ -370,7 +385,7 @@ class CLIPVisionLoader:
def load_clip(self, clip_name):
clip_path = folder_paths.get_full_path("clip_vision", clip_name)
clip_vision = comfy_extras.clip_vision.load(clip_path)
clip_vision = comfy.clip_vision.load(clip_path)
return (clip_vision,)
class CLIPVisionEncode:
@@ -382,7 +397,7 @@ class CLIPVisionEncode:
RETURN_TYPES = ("CLIP_VISION_OUTPUT",)
FUNCTION = "encode"
CATEGORY = "conditioning/style_model"
CATEGORY = "conditioning"
def encode(self, clip_vision, image):
output = clip_vision.encode_image(image)
@@ -424,6 +439,32 @@ class StyleModelApply:
c.append(n)
return (c, )
class unCLIPConditioning:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"clip_vision_output": ("CLIP_VISION_OUTPUT", ),
"strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "apply_adm"
CATEGORY = "_for_testing/unclip"
def apply_adm(self, conditioning, clip_vision_output, strength):
c = []
for t in conditioning:
o = t[1].copy()
x = (clip_vision_output, strength)
if "adm" in o:
o["adm"] = o["adm"][:] + [x]
else:
o["adm"] = [x]
n = [t[0], o]
c.append(n)
return (c, )
class EmptyLatentImage:
def __init__(self, device="cpu"):
self.device = device
@@ -1025,6 +1066,7 @@ NODE_CLASS_MAPPINGS = {
"CLIPLoader": CLIPLoader,
"CLIPVisionEncode": CLIPVisionEncode,
"StyleModelApply": StyleModelApply,
"unCLIPConditioning": unCLIPConditioning,
"ControlNetApply": ControlNetApply,
"ControlNetLoader": ControlNetLoader,
"DiffControlNetLoader": DiffControlNetLoader,
@@ -1033,6 +1075,7 @@ NODE_CLASS_MAPPINGS = {
"VAEDecodeTiled": VAEDecodeTiled,
"VAEEncodeTiled": VAEEncodeTiled,
"TomePatchModel": TomePatchModel,
"unCLIPCheckpointLoader": unCLIPCheckpointLoader,
}
def load_custom_node(module_path):