From 991de5fc81eb867d3c5b28ec9a46ffd9da4221f2 Mon Sep 17 00:00:00 2001 From: bigcat88 Date: Thu, 24 Jul 2025 10:18:55 +0300 Subject: [PATCH 1/7] converted nodes files starting with "l" letter --- comfy_api/v3/io.py | 27 +++- comfy_api/v3/ui.py | 2 +- comfy_extras/v3/nodes_ip2p.py | 56 ++++++++ comfy_extras/v3/nodes_load_3d.py | 180 ++++++++++++++++++++++++++ comfy_extras/v3/nodes_lora_extract.py | 138 ++++++++++++++++++++ comfy_extras/v3/nodes_lotus.py | 34 +++++ comfy_extras/v3/nodes_lumina2.py | 116 +++++++++++++++++ nodes.py | 5 + 8 files changed, 556 insertions(+), 2 deletions(-) create mode 100644 comfy_extras/v3/nodes_ip2p.py create mode 100644 comfy_extras/v3/nodes_load_3d.py create mode 100644 comfy_extras/v3/nodes_lora_extract.py create mode 100644 comfy_extras/v3/nodes_lotus.py create mode 100644 comfy_extras/v3/nodes_lumina2.py diff --git a/comfy_api/v3/io.py b/comfy_api/v3/io.py index a045123c7..d1fd22deb 100644 --- a/comfy_api/v3/io.py +++ b/comfy_api/v3/io.py @@ -657,9 +657,34 @@ class Accumulation(ComfyTypeIO): accum: list[Any] Type = AccumulationDict + @comfytype(io_type="LOAD3D_CAMERA") class Load3DCamera(ComfyTypeIO): - Type = Any # TODO: figure out type for this; in code, only described as image['camera_info'], gotten from a LOAD_3D or LOAD_3D_ANIMATION type + class CameraInfo(TypedDict): + position: dict[str, float | int] + target: dict[str, float | int] + zoom: int + cameraType: str + + Type = CameraInfo + + +@comfytype(io_type="LOAD_3D") +class Load3D(ComfyTypeIO): + """3D models are stored as a dictionary.""" + class Model3DDict(TypedDict): + image: str + mask: str + normal: str + camera_info: Load3DCamera.CameraInfo + recording: NotRequired[str] + + Type = Model3DDict + + +@comfytype(io_type="LOAD_3D_ANIMATION") +class Load3DAnimation(Load3D): + ... @comfytype(io_type="PHOTOMAKER") diff --git a/comfy_api/v3/ui.py b/comfy_api/v3/ui.py index d41d758d6..644bc4ca3 100644 --- a/comfy_api/v3/ui.py +++ b/comfy_api/v3/ui.py @@ -479,7 +479,7 @@ class PreviewUI3D(_UIOutput): self.values = values def as_dict(self): - return {"3d": self.values} + return {"result": self.values} class PreviewText(_UIOutput): diff --git a/comfy_extras/v3/nodes_ip2p.py b/comfy_extras/v3/nodes_ip2p.py new file mode 100644 index 000000000..79d9c2697 --- /dev/null +++ b/comfy_extras/v3/nodes_ip2p.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +import torch + +from comfy_api.v3 import io + + +class InstructPixToPixConditioning(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="InstructPixToPixConditioning_V3", + category="conditioning/instructpix2pix", + inputs=[ + io.Conditioning.Input(id="positive"), + io.Conditioning.Input(id="negative"), + io.Vae.Input(id="vae"), + io.Image.Input(id="pixels"), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, positive, negative, pixels, vae): + x = (pixels.shape[1] // 8) * 8 + y = (pixels.shape[2] // 8) * 8 + + if pixels.shape[1] != x or pixels.shape[2] != y: + x_offset = (pixels.shape[1] % 8) // 2 + y_offset = (pixels.shape[2] % 8) // 2 + pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:] + + concat_latent = vae.encode(pixels) + + out_latent = {} + out_latent["samples"] = torch.zeros_like(concat_latent) + + out = [] + for conditioning in [positive, negative]: + c = [] + for t in conditioning: + d = t[1].copy() + d["concat_latent_image"] = concat_latent + n = [t[0], d] + c.append(n) + out.append(c) + return io.NodeOutput(out[0], out[1], out_latent) + + +NODES_LIST = [ + InstructPixToPixConditioning, +] diff --git a/comfy_extras/v3/nodes_load_3d.py b/comfy_extras/v3/nodes_load_3d.py new file mode 100644 index 000000000..e7a82ccf3 --- /dev/null +++ b/comfy_extras/v3/nodes_load_3d.py @@ -0,0 +1,180 @@ +from __future__ import annotations + +import os +from pathlib import Path + +import folder_paths +import nodes +from comfy_api.input_impl import VideoFromFile +from comfy_api.v3 import io, ui + + +def normalize_path(path): + return path.replace("\\", "/") + + +class Load3D(io.ComfyNode): + @classmethod + def define_schema(cls): + input_dir = os.path.join(folder_paths.get_input_directory(), "3d") + + os.makedirs(input_dir, exist_ok=True) + + input_path = Path(input_dir) + base_path = Path(folder_paths.get_input_directory()) + + files = [ + normalize_path(str(file_path.relative_to(base_path))) + for file_path in input_path.rglob("*") + if file_path.suffix.lower() in {".gltf", ".glb", ".obj", ".fbx", ".stl"} + ] + + return io.Schema( + node_id="Load3D_V3", + display_name="Load 3D _V3", + category="3d", + is_experimental=True, + inputs=[ + io.Combo.Input(id="model_file", options=sorted(files), upload=io.UploadType.model), + io.Load3D.Input(id="image"), + io.Int.Input(id="width", default=1024, min=1, max=4096, step=1), + io.Int.Input(id="height", default=1024, min=1, max=4096, step=1), + ], + outputs=[ + io.Image.Output(display_name="image"), + io.Mask.Output(display_name="mask"), + io.String.Output(display_name="mesh_path"), + io.Image.Output(display_name="normal"), + io.Image.Output(display_name="lineart"), + io.Load3DCamera.Output(display_name="camera_info"), + io.Video.Output(display_name="recording_video"), + ], + ) + + @classmethod + def execute(cls, model_file, image, **kwargs): + image_path = folder_paths.get_annotated_filepath(image["image"]) + mask_path = folder_paths.get_annotated_filepath(image["mask"]) + normal_path = folder_paths.get_annotated_filepath(image["normal"]) + lineart_path = folder_paths.get_annotated_filepath(image["lineart"]) + + load_image_node = nodes.LoadImage() + output_image, ignore_mask = load_image_node.load_image(image=image_path) + ignore_image, output_mask = load_image_node.load_image(image=mask_path) + normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path) + lineart_image, ignore_mask3 = load_image_node.load_image(image=lineart_path) + + video = None + if image["recording"] != "": + recording_video_path = folder_paths.get_annotated_filepath(image["recording"]) + video = VideoFromFile(recording_video_path) + + return io.NodeOutput( + output_image, output_mask, model_file, normal_image, lineart_image, image["camera_info"], video + ) + + +class Load3DAnimation(io.ComfyNode): + @classmethod + def define_schema(cls): + input_dir = os.path.join(folder_paths.get_input_directory(), "3d") + + os.makedirs(input_dir, exist_ok=True) + + input_path = Path(input_dir) + base_path = Path(folder_paths.get_input_directory()) + + files = [ + normalize_path(str(file_path.relative_to(base_path))) + for file_path in input_path.rglob("*") + if file_path.suffix.lower() in {".gltf", ".glb", ".fbx"} + ] + + return io.Schema( + node_id="Load3DAnimation_V3", + display_name="Load 3D - Animation _V3", + category="3d", + is_experimental=True, + inputs=[ + io.Combo.Input(id="model_file", options=sorted(files), upload=io.UploadType.model), + io.Load3DAnimation.Input(id="image"), + io.Int.Input(id="width", default=1024, min=1, max=4096, step=1), + io.Int.Input(id="height", default=1024, min=1, max=4096, step=1), + ], + outputs=[ + io.Image.Output(display_name="image"), + io.Mask.Output(display_name="mask"), + io.String.Output(display_name="mesh_path"), + io.Image.Output(display_name="normal"), + io.Load3DCamera.Output(display_name="camera_info"), + io.Video.Output(display_name="recording_video"), + ], + ) + + @classmethod + def execute(cls, model_file, image, **kwargs): + image_path = folder_paths.get_annotated_filepath(image["image"]) + mask_path = folder_paths.get_annotated_filepath(image["mask"]) + normal_path = folder_paths.get_annotated_filepath(image["normal"]) + + load_image_node = nodes.LoadImage() + output_image, ignore_mask = load_image_node.load_image(image=image_path) + ignore_image, output_mask = load_image_node.load_image(image=mask_path) + normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path) + + video = None + if image['recording'] != "": + recording_video_path = folder_paths.get_annotated_filepath(image["recording"]) + video = VideoFromFile(recording_video_path) + + return io.NodeOutput(output_image, output_mask, model_file, normal_image, image["camera_info"], video) + + +class Preview3D(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="Preview3D_V3", # frontend expects "Preview3D" to work + display_name="Preview 3D _V3", + category="3d", + is_experimental=True, + is_output_node=True, + inputs=[ + io.String.Input(id="model_file", default="", multiline=False), + io.Load3DCamera.Input(id="camera_info", optional=True), + ], + outputs=[], + ) + + @classmethod + def execute(cls, model_file, camera_info=None): + return io.NodeOutput(ui=ui.PreviewUI3D([model_file, camera_info], cls=cls)) + + +class Preview3DAnimation(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="Preview3DAnimation_V3", # frontend expects "Preview3DAnimation" to work + display_name="Preview 3D - Animation _V3", + category="3d", + is_experimental=True, + is_output_node=True, + inputs=[ + io.String.Input(id="model_file", default="", multiline=False), + io.Load3DCamera.Input(id="camera_info", optional=True), + ], + outputs=[], + ) + + @classmethod + def execute(cls, model_file, camera_info=None): + return io.NodeOutput(ui=ui.PreviewUI3D([model_file, camera_info], cls=cls)) + + +NODES_LIST = [ + Load3D, + Load3DAnimation, + Preview3D, + Preview3DAnimation, +] diff --git a/comfy_extras/v3/nodes_lora_extract.py b/comfy_extras/v3/nodes_lora_extract.py new file mode 100644 index 000000000..180b62007 --- /dev/null +++ b/comfy_extras/v3/nodes_lora_extract.py @@ -0,0 +1,138 @@ +from __future__ import annotations + +import logging +import os +from enum import Enum + +import torch + +import comfy.model_management +import comfy.utils +import folder_paths +from comfy_api.v3 import io + +CLAMP_QUANTILE = 0.99 + + +def extract_lora(diff, rank): + conv2d = (len(diff.shape) == 4) + kernel_size = None if not conv2d else diff.size()[2:4] + conv2d_3x3 = conv2d and kernel_size != (1, 1) + out_dim, in_dim = diff.size()[0:2] + rank = min(rank, in_dim, out_dim) + + if conv2d: + if conv2d_3x3: + diff = diff.flatten(start_dim=1) + else: + diff = diff.squeeze() + + U, S, Vh = torch.linalg.svd(diff.float()) + U = U[:, :rank] + S = S[:rank] + U = U @ torch.diag(S) + Vh = Vh[:rank, :] + + dist = torch.cat([U.flatten(), Vh.flatten()]) + hi_val = torch.quantile(dist, CLAMP_QUANTILE) + low_val = -hi_val + + U = U.clamp(low_val, hi_val) + Vh = Vh.clamp(low_val, hi_val) + if conv2d: + U = U.reshape(out_dim, rank, 1, 1) + Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1]) + return (U, Vh) + + +class LORAType(Enum): + STANDARD = 0 + FULL_DIFF = 1 + + +LORA_TYPES = { + "standard": LORAType.STANDARD, + "full_diff": LORAType.FULL_DIFF, +} + + +def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, bias_diff=False): + comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True) + sd = model_diff.model_state_dict(filter_prefix=prefix_model) + + for k in sd: + if k.endswith(".weight"): + weight_diff = sd[k] + if lora_type == LORAType.STANDARD: + if weight_diff.ndim < 2: + if bias_diff: + output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() + continue + try: + out = extract_lora(weight_diff, rank) + output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() + output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() + except Exception: + logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + elif lora_type == LORAType.FULL_DIFF: + output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() + + elif bias_diff and k.endswith(".bias"): + output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().half().cpu() + return output_sd + + +class LoraSave(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoraSave_V3", + display_name="Extract and Save Lora _V3", + category="_for_testing", + is_output_node=True, + inputs=[ + io.String.Input(id="filename_prefix", default="loras/ComfyUI_extracted_lora"), + io.Int.Input(id="rank", default=8, min=1, max=4096, step=1), + io.Combo.Input(id="lora_type", options=list(LORA_TYPES.keys())), + io.Boolean.Input(id="bias_diff", default=True), + io.Model.Input( + id="model_diff", optional=True, tooltip="The ModelSubtract output to be converted to a lora." + ), + io.Clip.Input( + id="text_encoder_diff", optional=True, tooltip="The CLIPSubtract output to be converted to a lora." + ), + ], + outputs=[], + is_experimental=True, + ) + + @classmethod + def execute(cls, filename_prefix, rank, lora_type, bias_diff, model_diff=None, text_encoder_diff=None): + if model_diff is None and text_encoder_diff is None: + return io.NodeOutput() + + lora_type = LORA_TYPES.get(lora_type) + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( + filename_prefix, folder_paths.get_output_directory() + ) + + output_sd = {} + if model_diff is not None: + output_sd = calc_lora_model( + model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, bias_diff=bias_diff + ) + if text_encoder_diff is not None: + output_sd = calc_lora_model( + text_encoder_diff.patcher, rank, "", "text_encoders.", output_sd, lora_type, bias_diff=bias_diff + ) + + output_checkpoint = f"{filename}_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + + comfy.utils.save_torch_file(output_sd, output_checkpoint, metadata=None) + return io.NodeOutput() + + +NODES_LIST = [ + LoraSave, +] diff --git a/comfy_extras/v3/nodes_lotus.py b/comfy_extras/v3/nodes_lotus.py new file mode 100644 index 000000000..9b19ca16b --- /dev/null +++ b/comfy_extras/v3/nodes_lotus.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +import torch + +import comfy.model_management as mm +from comfy_api.v3 import io + + +class LotusConditioning(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LotusConditioning_V3", + category="conditioning/lotus", + outputs=[ + io.Conditioning.Output(display_name="conditioning"), + ], + ) + + @classmethod + def execute(cls): + device = mm.get_torch_device() + #lotus uses a frozen encoder and null conditioning, i'm just inlining the results of that operation since it doesn't change + #and getting parity with the reference implementation would otherwise require inference and 800mb of tensors + prompt_embeds = torch.tensor([[[-0.3134765625, -0.447509765625, -0.00823974609375, -0.22802734375, 0.1785888671875, -0.2342529296875, -0.2188720703125, -0.0089111328125, -0.31396484375, 0.196533203125, -0.055877685546875, -0.3828125, -0.0965576171875, 0.0073394775390625, -0.284423828125, 0.07470703125, -0.086181640625, -0.211181640625, 0.0599365234375, 0.10693359375, 0.0007929801940917969, -0.78076171875, -0.382568359375, -0.1851806640625, -0.140625, -0.0936279296875, -0.1229248046875, -0.152099609375, -0.203857421875, -0.2349853515625, -0.2437744140625, -0.10858154296875, -0.08990478515625, 0.08892822265625, -0.2391357421875, -0.1611328125, -0.427978515625, -0.1336669921875, -0.27685546875, -0.1781005859375, -0.3857421875, 0.251953125, -0.055999755859375, -0.0712890625, -0.00130462646484375, 0.033477783203125, -0.26416015625, 0.07171630859375, -0.0090789794921875, -0.2025146484375, -0.2763671875, -0.09869384765625, -0.45751953125, -0.23095703125, 0.004528045654296875, -0.369140625, -0.366943359375, -0.205322265625, -0.1505126953125, -0.45166015625, -0.2059326171875, 0.0168609619140625, -0.305419921875, -0.150634765625, 0.02685546875, -0.609375, -0.019012451171875, 0.050445556640625, -0.0084381103515625, -0.31005859375, -0.184326171875, -0.15185546875, 0.06732177734375, 0.150390625, -0.10919189453125, -0.08837890625, -0.50537109375, -0.389892578125, -0.0294342041015625, -0.10491943359375, -0.187255859375, -0.43212890625, -0.328125, -1.060546875, 0.011871337890625, 0.04730224609375, -0.09521484375, -0.07452392578125, -0.29296875, -0.109130859375, -0.250244140625, -0.3828125, -0.171875, -0.03399658203125, -0.15478515625, -0.1861572265625, -0.2398681640625, 0.1053466796875, -0.22314453125, -0.1932373046875, -0.18798828125, -0.430419921875, -0.05364990234375, -0.474609375, -0.261474609375, -0.1077880859375, -0.439208984375, 0.08966064453125, -0.185302734375, -0.338134765625, -0.297119140625, -0.298583984375, -0.175537109375, -0.373291015625, -0.1397705078125, -0.260498046875, -0.383544921875, -0.09979248046875, -0.319580078125, -0.06884765625, -0.4365234375, -0.183837890625, -0.393310546875, -0.002277374267578125, 0.11236572265625, -0.260498046875, -0.2242431640625, -0.19384765625, -0.51123046875, 0.03216552734375, -0.048004150390625, -0.279052734375, -0.2978515625, -0.255615234375, 0.115478515625, -4.08984375, -0.1668701171875, -0.278076171875, -0.5712890625, -0.1385498046875, -0.244384765625, -0.41455078125, -0.244140625, -0.0677490234375, -0.141357421875, -0.11590576171875, -0.1439208984375, -0.0185394287109375, -2.490234375, -0.1549072265625, -0.2305908203125, -0.3828125, -0.1173095703125, -0.08258056640625, -0.1719970703125, -0.325439453125, -0.292724609375, -0.08154296875, -0.412353515625, -0.3115234375, -0.00832366943359375, 0.00489044189453125, -0.2236328125, -0.151123046875, -0.457275390625, -0.135009765625, -0.163330078125, -0.0819091796875, 0.06689453125, 0.0209197998046875, -0.11907958984375, -0.10369873046875, -0.2998046875, -0.478759765625, -0.07940673828125, -0.01517486572265625, -0.3017578125, -0.343994140625, -0.258544921875, -0.44775390625, -0.392822265625, -0.0255584716796875, -0.2998046875, 0.10833740234375, -0.271728515625, -0.36181640625, -0.255859375, -0.2056884765625, -0.055450439453125, 0.060516357421875, -0.45751953125, -0.2322998046875, -0.1737060546875, -0.40576171875, -0.2286376953125, -0.053070068359375, -0.0283660888671875, -0.1898193359375, -4.291534423828125e-05, -0.6591796875, -0.1717529296875, -0.479736328125, -0.1400146484375, -0.40771484375, 0.154296875, 0.003101348876953125, 0.00661468505859375, -0.2073974609375, -0.493408203125, 2.171875, -0.45361328125, -0.283935546875, -0.302001953125, -0.25146484375, -0.207275390625, -0.1524658203125, -0.72998046875, -0.08203125, 0.053192138671875, -0.2685546875, 0.1834716796875, -0.270263671875, -0.091552734375, -0.08319091796875, -0.1297607421875, -0.453857421875, 0.0687255859375, 0.0268096923828125, -0.16552734375, -0.4208984375, -0.1552734375, -0.057373046875, -0.300537109375, -0.04541015625, -0.486083984375, -0.2205810546875, -0.39013671875, 0.007488250732421875, -0.005329132080078125, -0.09759521484375, -0.1448974609375, -0.21923828125, -0.429443359375, -0.40087890625, -0.19384765625, -0.064453125, -0.0306243896484375, -0.045806884765625, -0.056793212890625, 0.119384765625, -0.2073974609375, -0.356201171875, -0.168212890625, -0.291748046875, -0.289794921875, -0.205322265625, -0.419677734375, -0.478271484375, -0.2037353515625, -0.368408203125, -0.186279296875, -0.427734375, -0.1756591796875, 0.07501220703125, -0.2457275390625, -0.03692626953125, 0.003997802734375, -5.7578125, -0.01052093505859375, -0.2305908203125, -0.2252197265625, -0.197509765625, -0.1566162109375, -0.1668701171875, -0.383056640625, -0.05413818359375, 0.12188720703125, -0.369873046875, -0.0184478759765625, -0.150146484375, -0.51123046875, -0.45947265625, -0.1561279296875, 0.060455322265625, 0.043487548828125, -0.1370849609375, -0.069091796875, -0.285888671875, -0.44482421875, -0.2374267578125, -0.2191162109375, -0.434814453125, -0.0360107421875, 0.1298828125, 0.0217742919921875, -0.51220703125, -0.13525390625, -0.09381103515625, -0.276611328125, -0.171875, -0.17138671875, -0.4443359375, -0.2178955078125, -0.269775390625, -0.38623046875, -0.31591796875, -0.42333984375, -0.280029296875, -0.255615234375, -0.17041015625, 0.06268310546875, -0.1878662109375, -0.00677490234375, -0.23583984375, -0.08795166015625, -0.2232666015625, -0.1719970703125, -0.484130859375, -0.328857421875, 0.04669189453125, -0.0419921875, -0.11114501953125, 0.02313232421875, -0.0033130645751953125, -0.6005859375, 0.09051513671875, -0.1884765625, -0.262939453125, -0.375732421875, -0.525390625, -0.1170654296875, -0.3779296875, -0.242919921875, -0.419921875, 0.0665283203125, -0.343017578125, 0.06658935546875, -0.346435546875, -0.1363525390625, -0.2000732421875, -0.3837890625, 0.028167724609375, 0.043853759765625, -0.0171051025390625, -0.477294921875, -0.107421875, -0.129150390625, -0.319580078125, -0.32177734375, -0.4951171875, -0.010589599609375, -0.1778564453125, -0.40234375, -0.0810546875, 0.03314208984375, -0.13720703125, -0.31591796875, -0.048248291015625, -0.274658203125, -0.0689697265625, -0.027130126953125, -0.0953369140625, 0.146728515625, -0.38671875, -0.025390625, -0.42333984375, -0.41748046875, -0.379638671875, -0.1978759765625, -0.533203125, -0.33544921875, 0.0694580078125, -0.322998046875, -0.1876220703125, 0.0094451904296875, 0.1839599609375, -0.254150390625, -0.30078125, -0.09228515625, -0.0885009765625, 0.12371826171875, 0.1500244140625, -0.12152099609375, -0.29833984375, 0.03924560546875, -0.1470947265625, -0.1610107421875, -0.2049560546875, -0.01708984375, -0.2470703125, -0.1522216796875, -0.25830078125, 0.10870361328125, -0.302490234375, -0.2376708984375, -0.360107421875, -0.443359375, -0.0784912109375, -0.63623046875, -0.0980224609375, -0.332275390625, -0.1749267578125, -0.30859375, -0.1968994140625, -0.250244140625, -0.447021484375, -0.18408203125, -0.006908416748046875, -0.2044677734375, -0.2548828125, -0.369140625, -0.11328125, -0.1103515625, -0.27783203125, -0.325439453125, 0.01381683349609375, 0.036773681640625, -0.1458740234375, -0.34619140625, -0.232177734375, -0.0562744140625, -0.4482421875, -0.21875, -0.0855712890625, -0.276123046875, -0.1544189453125, -0.223388671875, -0.259521484375, 0.0865478515625, -0.0038013458251953125, -0.340087890625, -0.076171875, -0.25341796875, -0.0007548332214355469, -0.060455322265625, -0.352294921875, 0.035736083984375, -0.2181396484375, -0.2318115234375, -0.1707763671875, 0.018646240234375, 0.093505859375, -0.197021484375, 0.033477783203125, -0.035247802734375, 0.0440673828125, -0.2056884765625, -0.040924072265625, -0.05865478515625, 0.056884765625, -0.08807373046875, -0.10845947265625, 0.09564208984375, -0.10888671875, -0.332275390625, -0.1119384765625, -0.115478515625, 13.0234375, 0.0030040740966796875, -0.53662109375, -0.1856689453125, -0.068115234375, -0.143798828125, -0.177978515625, -0.32666015625, -0.353515625, -0.1563720703125, -0.3203125, 0.0085906982421875, -0.1043701171875, -0.365478515625, -0.303466796875, -0.34326171875, -0.410888671875, -0.03790283203125, -0.11419677734375, -0.2939453125, 0.074462890625, -0.21826171875, 0.0242767333984375, -0.226318359375, -0.353515625, -0.177734375, -0.169189453125, -0.2423095703125, -0.12115478515625, -0.07843017578125, -0.341064453125, -0.2117919921875, -0.505859375, -0.544921875, -0.3935546875, -0.10772705078125, -0.2054443359375, -0.136474609375, -0.1796875, -0.396240234375, -0.1971435546875, -0.68408203125, -0.032684326171875, -0.03863525390625, -0.0709228515625, -0.1005859375, -0.156005859375, -0.3837890625, -0.319580078125, 0.11102294921875, -0.394287109375, 0.0799560546875, -0.50341796875, -0.1572265625, 0.004131317138671875, -0.12286376953125, -0.2347412109375, -0.29150390625, -0.10321044921875, -0.286376953125, 0.018798828125, -0.152099609375, -0.321044921875, 0.0191650390625, -0.11376953125, -0.54736328125, 0.15869140625, -0.257568359375, -0.2490234375, -0.3115234375, -0.09765625, -0.350830078125, -0.36376953125, -0.0771484375, -0.2298583984375, -0.30615234375, -0.052154541015625, -0.12091064453125, -0.40283203125, -0.1649169921875, 0.0206451416015625, -0.312744140625, -0.10308837890625, -0.50341796875, -0.1754150390625, -0.2003173828125, -0.173583984375, -0.204833984375, -0.1876220703125, -0.12176513671875, -0.06201171875, -0.03485107421875, -0.20068359375, -0.21484375, -0.246337890625, -0.006587982177734375, -0.09674072265625, -0.4658203125, -0.3994140625, -0.2210693359375, -0.09588623046875, -0.126220703125, -0.09222412109375, -0.145751953125, -0.217529296875, -0.289306640625, -0.28271484375, -0.1787109375, -0.169189453125, -0.359375, -0.21826171875, -0.043792724609375, -0.205322265625, -0.2900390625, -0.055419921875, -0.1490478515625, -0.340576171875, -0.045928955078125, -0.30517578125, -0.51123046875, -0.1046142578125, -0.349853515625, -0.10882568359375, -0.16748046875, -0.267333984375, -0.122314453125, -0.0985107421875, -0.3076171875, -0.1766357421875, -0.251708984375, 0.1964111328125, -0.2220458984375, -0.2349853515625, -0.035980224609375, -0.1749267578125, -0.237060546875, -0.480224609375, -0.240234375, -0.09539794921875, -0.2481689453125, -0.389404296875, -0.1748046875, -0.370849609375, -0.010650634765625, -0.147705078125, -0.0035457611083984375, -0.32568359375, -0.29931640625, -0.1395263671875, -0.28173828125, -0.09820556640625, -0.0176239013671875, -0.05926513671875, -0.0755615234375, -0.1746826171875, -0.283203125, -0.1617431640625, -0.4404296875, 0.046234130859375, -0.183837890625, -0.052032470703125, -0.24658203125, -0.11224365234375, -0.100830078125, -0.162841796875, -0.29736328125, -0.396484375, 0.11798095703125, -0.006496429443359375, -0.32568359375, -0.347900390625, -0.04595947265625, -0.09637451171875, -0.344970703125, -0.01166534423828125, -0.346435546875, -0.2861328125, -0.1845703125, -0.276611328125, -0.01312255859375, -0.395263671875, -0.50927734375, -0.1114501953125, -0.1861572265625, -0.2158203125, -0.1812744140625, 0.055419921875, -0.294189453125, 0.06500244140625, -0.1444091796875, -0.06365966796875, -0.18408203125, -0.0091705322265625, -0.1640625, -0.1856689453125, 0.090087890625, 0.024566650390625, -0.0195159912109375, -0.5546875, -0.301025390625, -0.438232421875, -0.072021484375, 0.030517578125, -0.1490478515625, 0.04888916015625, -0.23681640625, -0.1553955078125, -0.018096923828125, -0.229736328125, -0.2919921875, -0.355712890625, -0.285400390625, -0.1756591796875, -0.08355712890625, -0.416259765625, 0.022674560546875, -0.417236328125, 0.410400390625, -0.249755859375, 0.015625, -0.033599853515625, -0.040313720703125, -0.51708984375, -0.0518798828125, -0.08843994140625, -0.2022705078125, -0.3740234375, -0.285888671875, -0.176025390625, -0.292724609375, -0.369140625, -0.08367919921875, -0.356689453125, -0.38623046875, 0.06549072265625, 0.1669921875, -0.2099609375, -0.007434844970703125, 0.12890625, -0.0040740966796875, -0.2174072265625, -0.025115966796875, -0.2364501953125, -0.1695556640625, -0.0469970703125, -0.03924560546875, -0.36181640625, -0.047515869140625, -0.3154296875, -0.275634765625, -0.25634765625, -0.061920166015625, -0.12164306640625, -0.47314453125, -0.10784912109375, -0.74755859375, -0.13232421875, -0.32421875, -0.04998779296875, -0.286376953125, 0.10345458984375, -0.1710205078125, -0.388916015625, 0.12744140625, -0.3359375, -0.302490234375, -0.238525390625, -0.1455078125, -0.15869140625, -0.2427978515625, -0.0355224609375, -0.11944580078125, -0.31298828125, 0.11456298828125, -0.287841796875, -0.5439453125, -0.3076171875, -0.08642578125, -0.2408447265625, -0.283447265625, -0.428466796875, -0.085693359375, -0.1683349609375, 0.255126953125, 0.07635498046875, -0.38623046875, -0.2025146484375, -0.1331787109375, -0.10821533203125, -0.49951171875, 0.09130859375, -0.19677734375, -0.01904296875, -0.151123046875, -0.344482421875, -0.316650390625, -0.03900146484375, 0.1397705078125, 0.1334228515625, -0.037200927734375, -0.01861572265625, -0.1351318359375, -0.07037353515625, -0.380615234375, -0.34033203125, -0.06903076171875, 0.219970703125, 0.0132598876953125, -0.15869140625, -0.6376953125, 0.158935546875, -0.5283203125, -0.2320556640625, -0.185791015625, -0.2132568359375, -0.436767578125, -0.430908203125, -0.1763916015625, -0.0007672309875488281, -0.424072265625, -0.06719970703125, -0.347900390625, -0.14453125, -0.3056640625, -0.36474609375, -0.35986328125, -0.46240234375, -0.446044921875, -0.1905517578125, -0.1114501953125, -0.42919921875, -0.0643310546875, -0.3662109375, -0.4296875, -0.10968017578125, -0.2998046875, -0.1756591796875, -0.4052734375, -0.0841064453125, -0.252197265625, -0.047393798828125, 0.00434112548828125, -0.10040283203125, -0.271484375, -0.185302734375, -0.1910400390625, 0.10260009765625, 0.01393890380859375, -0.03350830078125, -0.33935546875, -0.329345703125, 0.0574951171875, -0.18896484375, -0.17724609375, -0.42919921875, -0.26708984375, -0.4189453125, -0.149169921875, -0.265625, -0.198974609375, -0.1722412109375, 0.1563720703125, -0.20947265625, -0.267822265625, -0.06353759765625, -0.365478515625, -0.340087890625, -0.3095703125, -0.320068359375, -0.0880126953125, -0.353759765625, -0.0005812644958496094, -0.1617431640625, -0.1866455078125, -0.201416015625, -0.181396484375, -0.2349853515625, -0.384765625, -0.5244140625, 0.01227569580078125, -0.21337890625, -0.30810546875, -0.17578125, -0.3037109375, -0.52978515625, -0.1561279296875, -0.296142578125, 0.057342529296875, -0.369384765625, -0.107666015625, -0.338623046875, -0.2060546875, -0.0213775634765625, -0.394775390625, -0.219482421875, -0.125732421875, -0.03997802734375, -0.42431640625, -0.134521484375, -0.2418212890625, -0.10504150390625, 0.1552734375, 0.1126708984375, -0.1427001953125, -0.133544921875, -0.111083984375, -0.375732421875, -0.2783203125, -0.036834716796875, -0.11053466796875, 0.2471923828125, -0.2529296875, -0.56494140625, -0.374755859375, -0.326416015625, 0.2137451171875, -0.09454345703125, -0.337158203125, -0.3359375, -0.34375, -0.0999755859375, -0.388671875, 0.0103302001953125, 0.14990234375, -0.2041015625, -0.39501953125, -0.39013671875, -0.1258544921875, 0.1453857421875, -0.250732421875, -0.06732177734375, -0.10638427734375, -0.032379150390625, -0.35888671875, -0.098876953125, -0.172607421875, 0.05126953125, -0.1956787109375, -0.183837890625, -0.37060546875, 0.1556396484375, -0.34375, -0.28662109375, -0.06982421875, -0.302490234375, -0.281005859375, -0.1640625, -0.5302734375, -0.1368408203125, -0.1268310546875, -0.35302734375, -0.1473388671875, -0.45556640625, -0.35986328125, -0.273681640625, -0.2249755859375, -0.1893310546875, 0.09356689453125, -0.248291015625, -0.197998046875, -0.3525390625, -0.30126953125, -0.228271484375, -0.2421875, -0.0906982421875, 0.227783203125, -0.296875, -0.009796142578125, -0.2939453125, -0.1021728515625, -0.215576171875, -0.267822265625, -0.052642822265625, 0.203369140625, -0.1417236328125, 0.18505859375, 0.12347412109375, -0.0972900390625, -0.54052734375, -0.430419921875, -0.0906982421875, -0.5419921875, -0.22900390625, -0.0625, -0.12152099609375, -0.495849609375, -0.206787109375, -0.025848388671875, 0.039031982421875, -0.453857421875, -0.318359375, -0.426025390625, -0.3701171875, -0.2169189453125, 0.0845947265625, -0.045654296875, 0.11090087890625, 0.0012454986572265625, 0.2066650390625, -0.046356201171875, -0.2337646484375, -0.295654296875, 0.057891845703125, -0.1639404296875, -0.0535888671875, -0.2607421875, -0.1488037109375, -0.16015625, -0.54345703125, -0.2305908203125, -0.55029296875, -0.178955078125, -0.222412109375, -0.0711669921875, -0.12298583984375, -0.119140625, -0.253662109375, -0.33984375, -0.11322021484375, -0.10723876953125, -0.205078125, -0.360595703125, 0.085205078125, -0.252197265625, -0.365966796875, -0.26953125, 0.2000732421875, -0.50634765625, 0.05706787109375, -0.3115234375, 0.0242919921875, -0.1689453125, -0.2401123046875, -0.3759765625, -0.2125244140625, 0.076416015625, -0.489013671875, -0.11749267578125, -0.55908203125, -0.313232421875, -0.572265625, -0.1387939453125, -0.037078857421875, -0.385498046875, 0.0323486328125, -0.39404296875, -0.05072021484375, -0.10430908203125, -0.10919189453125, -0.28759765625, -0.37451171875, -0.016937255859375, -0.2200927734375, -0.296875, -0.0286712646484375, -0.213134765625, 0.052001953125, -0.052337646484375, -0.253662109375, 0.07269287109375, -0.2498779296875, -0.150146484375, -0.09930419921875, -0.343505859375, 0.254150390625, -0.032440185546875, -0.296142578125], [1.4111328125, 0.00757598876953125, -0.428955078125, 0.089599609375, 0.0227813720703125, -0.0350341796875, -1.0986328125, 0.194091796875, 2.115234375, -0.75439453125, 0.269287109375, -0.73486328125, -1.1025390625, -0.050262451171875, -0.5830078125, 0.0268707275390625, -0.603515625, -0.6025390625, -1.1689453125, 0.25048828125, -0.4189453125, -0.5517578125, -0.30322265625, 0.7724609375, 0.931640625, -0.1422119140625, 2.27734375, -0.56591796875, 1.013671875, -0.9638671875, -0.66796875, -0.8125, 1.3740234375, -1.060546875, -1.029296875, -1.6796875, 0.62890625, 0.49365234375, 0.671875, 0.99755859375, -1.0185546875, -0.047027587890625, -0.374267578125, 0.2354736328125, 1.4970703125, -1.5673828125, 0.448974609375, 0.2078857421875, -1.060546875, -0.171875, -0.6201171875, -0.1607666015625, 0.7548828125, -0.58935546875, -0.2052001953125, 0.060791015625, 0.200439453125, 3.154296875, -3.87890625, 2.03515625, 1.126953125, 0.1640625, -1.8447265625, 0.002620697021484375, 0.7998046875, -0.337158203125, 0.47216796875, -0.5849609375, 0.9970703125, 0.3935546875, 1.22265625, -1.5048828125, -0.65673828125, 1.1474609375, -1.73046875, -1.8701171875, 1.529296875, -0.6787109375, -1.4453125, 1.556640625, -0.327392578125, 2.986328125, -0.146240234375, -2.83984375, 0.303466796875, -0.71728515625, -0.09698486328125, -0.2423095703125, 0.6767578125, -2.197265625, -0.86279296875, -0.53857421875, -1.2236328125, 1.669921875, -1.1689453125, -0.291259765625, -0.54736328125, -0.036346435546875, 1.041015625, -1.7265625, -0.6064453125, -0.1634521484375, 0.2381591796875, 0.65087890625, -1.169921875, 1.9208984375, 0.5634765625, 0.37841796875, 0.798828125, -1.021484375, -0.4091796875, 2.275390625, -0.302734375, -1.7783203125, 1.0458984375, 1.478515625, 0.708984375, -1.541015625, -0.0006041526794433594, 1.1884765625, 2.041015625, 0.560546875, -0.1131591796875, 1.0341796875, 0.06121826171875, 2.6796875, -0.53369140625, -1.2490234375, -0.7333984375, -1.017578125, -1.0078125, 1.3212890625, -0.47607421875, -1.4189453125, 0.54052734375, -0.796875, -0.73095703125, -1.412109375, -0.94873046875, -2.2734375, -1.1220703125, -1.3837890625, -0.5087890625, -1.0380859375, -0.93603515625, -0.58349609375, -1.0703125, -1.10546875, -2.60546875, 0.062225341796875, 0.38232421875, -0.411376953125, -0.369140625, -0.9833984375, -0.7294921875, -0.181396484375, -0.47216796875, -0.56884765625, -0.11041259765625, -2.673828125, 0.27783203125, -0.857421875, 0.9296875, 1.9580078125, 0.1385498046875, -1.91796875, -1.529296875, 0.53857421875, 0.509765625, -0.90380859375, -0.0947265625, -2.083984375, 0.9228515625, -0.28564453125, -0.80859375, -0.093505859375, -0.6015625, -1.255859375, 0.6533203125, 0.327880859375, -0.07598876953125, -0.22705078125, -0.30078125, -0.5185546875, -1.6044921875, 1.5927734375, 1.416015625, -0.91796875, -0.276611328125, -0.75830078125, -1.1689453125, -1.7421875, 1.0546875, -0.26513671875, -0.03314208984375, 0.278076171875, -1.337890625, 0.055023193359375, 0.10546875, -1.064453125, 1.048828125, -1.4052734375, -1.1240234375, -0.51416015625, -1.05859375, -1.7265625, -1.1328125, 0.43310546875, -2.576171875, -2.140625, -0.79345703125, 0.50146484375, 1.96484375, 0.98583984375, 0.337646484375, -0.77978515625, 0.85498046875, -0.65185546875, -0.484375, 2.708984375, 0.55810546875, -0.147216796875, -0.5537109375, -0.75439453125, -1.736328125, 1.1259765625, -1.095703125, -0.2587890625, 2.978515625, 0.335205078125, 0.357666015625, -0.09356689453125, 0.295654296875, -0.23779296875, 1.5751953125, 0.10400390625, 1.7001953125, -0.72900390625, -1.466796875, -0.2012939453125, 0.634765625, -0.1556396484375, -2.01171875, 0.32666015625, 0.047454833984375, -0.1671142578125, -0.78369140625, -0.994140625, 0.7802734375, -0.1429443359375, -0.115234375, 0.53271484375, -0.96142578125, -0.064208984375, 1.396484375, 1.654296875, -1.6015625, -0.77392578125, 0.276123046875, -0.42236328125, 0.8642578125, 0.533203125, 0.397216796875, -1.21484375, 0.392578125, -0.501953125, -0.231689453125, 1.474609375, 1.6669921875, 1.8662109375, -1.2998046875, 0.223876953125, -0.51318359375, -0.437744140625, -1.16796875, -0.7724609375, 1.6826171875, 0.62255859375, 2.189453125, -0.599609375, -0.65576171875, -1.1005859375, -0.45263671875, -0.292236328125, 2.58203125, -1.3779296875, 0.23486328125, -1.708984375, -1.4111328125, -0.5078125, -0.8525390625, -0.90771484375, 0.861328125, -2.22265625, -1.380859375, 0.7275390625, 0.85595703125, -0.77978515625, 2.044921875, -0.430908203125, 0.78857421875, -1.21484375, -0.09130859375, 0.5146484375, -1.92578125, -0.1396484375, 0.289306640625, 0.60498046875, 0.93896484375, -0.09295654296875, -0.45751953125, -0.986328125, -0.66259765625, 1.48046875, 0.274169921875, -0.267333984375, -1.3017578125, -1.3623046875, -1.982421875, -0.86083984375, -0.41259765625, -0.2939453125, -1.91015625, 1.6826171875, 0.437255859375, 1.0029296875, 0.376220703125, -0.010467529296875, -0.82861328125, -0.513671875, -3.134765625, 1.0205078125, -1.26171875, -1.009765625, 1.0869140625, -0.95703125, 0.0103759765625, 1.642578125, 0.78564453125, 1.029296875, 0.496826171875, 1.2880859375, 0.5234375, 0.05322265625, -0.206787109375, -0.79443359375, -1.1669921875, 0.049530029296875, -0.27978515625, 0.0237884521484375, -0.74169921875, -1.068359375, 0.86083984375, 1.1787109375, 0.91064453125, -0.453857421875, -1.822265625, -0.9228515625, -0.50048828125, 0.359130859375, 0.802734375, -1.3564453125, -0.322509765625, -1.1123046875, -1.0390625, -0.52685546875, -1.291015625, -0.343017578125, -1.2109375, -0.19091796875, 2.146484375, -0.04315185546875, -0.3701171875, -2.044921875, -0.429931640625, -0.56103515625, -0.166015625, -0.4658203125, -2.29296875, -1.078125, -1.0927734375, -0.1033935546875, -0.56103515625, -0.05743408203125, -1.986328125, -0.513671875, 0.70361328125, -2.484375, -1.3037109375, -1.6650390625, 0.4814453125, -0.84912109375, -2.697265625, -0.197998046875, 0.0869140625, -0.172607421875, -1.326171875, -1.197265625, 1.23828125, -0.38720703125, -0.075927734375, 0.02569580078125, -1.2119140625, 0.09027099609375, -2.12890625, -1.640625, -0.1524658203125, 0.2373046875, 1.37109375, 2.248046875, 1.4619140625, 0.3134765625, 0.50244140625, -0.1383056640625, -1.2705078125, 0.7353515625, 0.65771484375, -0.431396484375, -1.341796875, 0.10089111328125, 0.208984375, -0.0099945068359375, 0.83203125, 1.314453125, -0.422607421875, -1.58984375, -0.6044921875, 0.23681640625, -1.60546875, -0.61083984375, -1.5615234375, 1.62890625, -0.6728515625, -0.68212890625, -0.5224609375, -0.9150390625, -0.468994140625, 0.268310546875, 0.287353515625, -0.025543212890625, 0.443603515625, 1.62109375, -1.08984375, -0.5556640625, 1.03515625, -0.31298828125, -0.041778564453125, 0.260986328125, 0.34716796875, -2.326171875, 0.228271484375, -0.85107421875, -2.255859375, 0.3486328125, -0.25830078125, -0.3671875, -0.796875, -1.115234375, 1.8369140625, -0.19775390625, -1.236328125, -0.0447998046875, 0.69921875, 1.37890625, 1.11328125, 0.0928955078125, 0.6318359375, -0.62353515625, 0.55859375, -0.286865234375, 1.5361328125, -0.391357421875, -0.052215576171875, -1.12890625, 0.55517578125, -0.28515625, -0.3603515625, 0.68896484375, 0.67626953125, 0.003070831298828125, 1.2236328125, 0.1597900390625, -1.3076171875, 0.99951171875, -2.5078125, -1.2119140625, 0.1749267578125, -1.1865234375, -1.234375, -0.1180419921875, -1.751953125, 0.033050537109375, 0.234130859375, -3.107421875, -1.0380859375, 0.61181640625, -0.87548828125, 0.3154296875, -1.103515625, 0.261474609375, -1.130859375, -0.7470703125, -0.43408203125, 1.3828125, -0.41259765625, -1.7587890625, 0.765625, 0.004852294921875, 0.135498046875, -0.76953125, -0.1314697265625, 0.400390625, 1.43359375, 0.07135009765625, 0.0645751953125, -0.5869140625, -0.5810546875, -0.2900390625, -1.3037109375, 0.1287841796875, -0.27490234375, 0.59228515625, 2.333984375, -0.54541015625, -0.556640625, 0.447265625, -0.806640625, 0.09149169921875, -0.70654296875, -0.357177734375, -1.099609375, -0.5576171875, -0.44189453125, 0.400390625, -0.666015625, -1.4619140625, 0.728515625, -1.5986328125, 0.153076171875, -0.126708984375, -2.83984375, -1.84375, -0.2469482421875, 0.677734375, 0.43701171875, 3.298828125, 1.1591796875, -0.7158203125, -0.8251953125, 0.451171875, -2.376953125, -0.58642578125, -0.86767578125, 0.0789794921875, 0.1351318359375, -0.325439453125, 0.484375, 1.166015625, -0.1610107421875, -0.15234375, -0.54638671875, -0.806640625, 0.285400390625, 0.1661376953125, -0.50146484375, -1.0478515625, 1.5751953125, 0.0313720703125, 0.2396240234375, -0.6572265625, -0.1258544921875, -1.060546875, 1.3076171875, -0.301513671875, -1.2412109375, 0.6376953125, -1.5693359375, 0.354248046875, 0.2427978515625, -0.392333984375, 0.61962890625, -0.58837890625, -1.71484375, -0.2098388671875, -0.828125, 0.330810546875, 0.16357421875, -0.2259521484375, 0.0972900390625, -0.451416015625, 1.79296875, -1.673828125, -1.58203125, -2.099609375, -0.487548828125, -0.87060546875, 0.62646484375, -1.470703125, -0.1558837890625, 0.4609375, 1.3369140625, 0.2322998046875, 0.1632080078125, 0.65966796875, 1.0810546875, 0.1041259765625, 0.63232421875, -0.32421875, -1.04296875, -1.046875, -1.3720703125, -0.8486328125, 0.1290283203125, 0.137939453125, 0.1549072265625, -1.0908203125, 0.0167694091796875, -0.31689453125, 1.390625, 0.07269287109375, 1.0390625, 1.1162109375, -0.455810546875, -0.06689453125, -0.053741455078125, 0.5048828125, -0.8408203125, -1.19921875, 0.87841796875, 0.7421875, 0.2030029296875, 0.109619140625, -0.59912109375, -1.337890625, -0.74169921875, -0.64453125, -1.326171875, 0.21044921875, -1.3583984375, -1.685546875, -0.472900390625, -0.270263671875, 0.99365234375, -0.96240234375, 1.1279296875, -0.45947265625, -0.45654296875, -0.99169921875, -3.515625, -1.9853515625, 0.73681640625, 0.92333984375, -0.56201171875, -1.4453125, -2.078125, 0.94189453125, -1.333984375, 0.0982666015625, 0.60693359375, 0.367431640625, 3.015625, -1.1357421875, -1.5634765625, 0.90234375, -0.1783447265625, 0.1802978515625, -0.317138671875, -0.513671875, 1.2353515625, -0.033203125, 1.4482421875, 1.0087890625, 0.9248046875, 0.10418701171875, 0.7626953125, -1.3798828125, 0.276123046875, 0.55224609375, 1.1005859375, -0.62158203125, -0.806640625, 0.65087890625, 0.270263671875, -0.339111328125, -0.9384765625, -0.09381103515625, -0.7216796875, 1.37890625, -0.398193359375, -0.3095703125, -1.4912109375, 0.96630859375, 0.43798828125, 0.62255859375, 0.0213470458984375, 0.235595703125, -1.2958984375, 0.0157318115234375, -0.810546875, 1.9736328125, -0.2462158203125, 0.720703125, 0.822265625, -0.755859375, -0.658203125, 0.344482421875, -2.892578125, -0.282470703125, 1.2529296875, -0.294189453125, 0.6748046875, -0.80859375, 0.9287109375, 1.27734375, -1.71875, -0.166015625, 0.47412109375, -0.41259765625, -1.3681640625, -0.978515625, -0.77978515625, -1.044921875, -0.90380859375, -0.08184814453125, -0.86181640625, -0.10772705078125, -0.299560546875, -0.4306640625, -0.47119140625, 0.95703125, 1.107421875, 0.91796875, 0.76025390625, 0.7392578125, -0.09161376953125, -0.7392578125, 0.9716796875, -0.395751953125, -0.75390625, -0.164306640625, -0.087646484375, 0.028564453125, -0.91943359375, -0.66796875, 2.486328125, 0.427734375, 0.626953125, 0.474853515625, 0.0926513671875, 0.830078125, -0.6923828125, 0.7841796875, -0.89208984375, -2.482421875, 0.034912109375, -1.3447265625, -0.475341796875, -0.286376953125, -0.732421875, 0.190673828125, -0.491455078125, -3.091796875, -1.2783203125, -0.66015625, -0.1507568359375, 0.042236328125, -1.025390625, 0.12744140625, -1.984375, -0.393798828125, -1.25, -1.140625, 1.77734375, 0.2457275390625, -0.8017578125, 0.7763671875, -0.387939453125, -0.3662109375, 1.1572265625, 0.123291015625, -0.07135009765625, 1.412109375, -0.685546875, -3.078125, 0.031524658203125, -0.70458984375, 0.78759765625, 0.433837890625, -1.861328125, -1.33203125, 2.119140625, -1.3544921875, -0.6591796875, -1.4970703125, 0.40625, -2.078125, -1.30859375, 0.050262451171875, -0.60107421875, 1.0078125, 0.05657958984375, -0.96826171875, 0.0264892578125, 0.159912109375, 0.84033203125, -1.1494140625, -0.0433349609375, -0.2034912109375, 1.09765625, -1.142578125, -0.283203125, -0.427978515625, 1.0927734375, -0.67529296875, -0.61572265625, 2.517578125, 0.84130859375, 1.8662109375, 0.1748046875, -0.407958984375, -0.029449462890625, -0.27587890625, -0.958984375, -0.10028076171875, 1.248046875, -0.0792236328125, -0.45556640625, 0.7685546875, 1.5556640625, -1.8759765625, -0.131591796875, -1.3583984375, 0.7890625, 0.80810546875, -1.0322265625, -0.53076171875, -0.1484375, -1.7841796875, -1.2470703125, 0.17138671875, -0.04864501953125, -0.80322265625, -0.0933837890625, 0.984375, 0.7001953125, 0.5380859375, 0.2022705078125, -1.1865234375, 0.5439453125, 1.1318359375, 0.79931640625, 0.32666015625, -1.26171875, 0.457763671875, 1.1591796875, -0.34423828125, 0.65771484375, 0.216552734375, 1.19140625, -0.2744140625, -0.020416259765625, -0.86376953125, 0.93017578125, 1.0556640625, 0.69873046875, -0.15087890625, -0.33056640625, 0.8505859375, 0.06890869140625, 0.359375, -0.262939453125, 0.12493896484375, 0.017059326171875, -0.98974609375, 0.5107421875, 0.2408447265625, 0.615234375, -0.62890625, 0.86962890625, -0.07427978515625, 0.85595703125, 0.300537109375, -1.072265625, -1.6064453125, -0.353515625, -0.484130859375, -0.6044921875, -0.455810546875, 0.95849609375, 1.3671875, 0.544921875, 0.560546875, 0.34521484375, -0.6513671875, -0.410400390625, -0.2021484375, -0.1656494140625, 0.073486328125, 0.84716796875, -1.7998046875, -1.0126953125, -0.1324462890625, 0.95849609375, -0.669921875, -0.79052734375, -2.193359375, -0.42529296875, -1.7275390625, -1.04296875, 0.716796875, -0.4423828125, -1.193359375, 0.61572265625, -1.5224609375, 0.62890625, -0.705078125, 0.677734375, -0.213134765625, -1.6748046875, -1.087890625, -0.65185546875, -1.1337890625, 2.314453125, -0.352783203125, -0.27001953125, -2.01953125, -1.2685546875, 0.308837890625, -0.280517578125, -1.3798828125, -1.595703125, 0.642578125, 1.693359375, -0.82470703125, -1.255859375, 0.57373046875, 1.5859375, 1.068359375, -0.876953125, 0.370849609375, 1.220703125, 0.59765625, 0.007602691650390625, 0.09326171875, -0.9521484375, -0.024932861328125, -0.94775390625, -0.299560546875, -0.002536773681640625, 1.41796875, -0.06903076171875, -1.5927734375, 0.353515625, 3.63671875, -0.765625, -1.1142578125, 0.4287109375, -0.86865234375, -0.9267578125, -0.21826171875, -1.10546875, 0.29296875, -0.225830078125, 0.5400390625, -0.45556640625, -0.68701171875, -0.79150390625, -1.0810546875, 0.25439453125, -1.2998046875, -0.494140625, -0.1510009765625, 1.5615234375, -0.4248046875, -0.486572265625, 0.45458984375, 0.047637939453125, -0.11639404296875, 0.057403564453125, 0.130126953125, -0.10125732421875, -0.56201171875, 1.4765625, -1.7451171875, 1.34765625, -0.45703125, 0.873046875, -0.056121826171875, -0.8876953125, -0.986328125, 1.5654296875, 0.49853515625, 0.55859375, -0.2198486328125, 0.62548828125, 0.2734375, -0.63671875, -0.41259765625, -1.2705078125, 0.0665283203125, 1.3369140625, 0.90283203125, -0.77685546875, -1.5, -1.8525390625, -1.314453125, -0.86767578125, -0.331787109375, 0.1590576171875, 0.94775390625, -0.1771240234375, 1.638671875, -2.17578125, 0.58740234375, 0.424560546875, -0.3466796875, 0.642578125, 0.473388671875, 0.96435546875, 1.38671875, -0.91357421875, 1.0361328125, -0.67333984375, 1.5009765625]]]).to(device) + + cond = [[prompt_embeds, {}]] + + return io.NodeOutput(cond) + + +NODES_LIST = [ + LotusConditioning, +] diff --git a/comfy_extras/v3/nodes_lumina2.py b/comfy_extras/v3/nodes_lumina2.py new file mode 100644 index 000000000..66ea981fc --- /dev/null +++ b/comfy_extras/v3/nodes_lumina2.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +import torch + +from comfy_api.v3 import io + + +class CLIPTextEncodeLumina2(io.ComfyNode): + SYSTEM_PROMPT = { + "superior": "You are an assistant designed to generate superior images with the superior " + "degree of image-text alignment based on textual prompts or user prompts.", + "alignment": "You are an assistant designed to generate high-quality images with the " + "highest degree of image-text alignment based on textual prompts." + } + SYSTEM_PROMPT_TIP = "Lumina2 provide two types of system prompts:" \ + "Superior: You are an assistant designed to generate superior images with the superior "\ + "degree of image-text alignment based on textual prompts or user prompts. "\ + "Alignment: You are an assistant designed to generate high-quality images with the highest "\ + "degree of image-text alignment based on textual prompts." + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeLumina2_V3", + display_name="CLIP Text Encode for Lumina2 _V3", + category="conditioning", + description="Encodes a system prompt and a user prompt using a CLIP model into an embedding " + "that can be used to guide the diffusion model towards generating specific images.", + inputs=[ + io.Combo.Input(id="system_prompt", options=list(cls.SYSTEM_PROMPT.keys()), tooltip=cls.SYSTEM_PROMPT_TIP), + io.String.Input(id="user_prompt", multiline=True, dynamic_prompts=True, tooltip="The text to be encoded."), + io.Clip.Input(id="clip", tooltip="The CLIP model used for encoding the text."), + ], + outputs=[ + io.Conditioning.Output(tooltip="A conditioning containing the embedded text used to guide the diffusion model."), + ], + ) + + @classmethod + def execute(cls, system_prompt, user_prompt, clip): + if clip is None: + raise RuntimeError( + "ERROR: clip input is invalid: None\n\n" + "If the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model." + ) + system_prompt = cls.SYSTEM_PROMPT[system_prompt] + prompt = f'{system_prompt} {user_prompt}' + tokens = clip.tokenize(prompt) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) + + +class RenormCFG(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="RenormCFG_V3", + category="advanced/model", + inputs=[ + io.Model.Input(id="model"), + io.Float.Input(id="cfg_trunc", default=100, min=0.0, max=100.0, step=0.01), + io.Float.Input(id="renorm_cfg", default=1.0, min=0.0, max=100.0, step=0.01), + ], + outputs=[ + io.Model.Output(), + ], + ) + + @classmethod + def execute(cls, model, cfg_trunc, renorm_cfg): + def renorm_cfg_func(args): + cond_denoised = args["cond_denoised"] + uncond_denoised = args["uncond_denoised"] + cond_scale = args["cond_scale"] + timestep = args["timestep"] + x_orig = args["input"] + in_channels = model.model.diffusion_model.in_channels + + if timestep[0] < cfg_trunc: + cond_eps, uncond_eps = cond_denoised[:, :in_channels], uncond_denoised[:, :in_channels] + cond_rest, _ = cond_denoised[:, in_channels:], uncond_denoised[:, in_channels:] + half_eps = uncond_eps + cond_scale * (cond_eps - uncond_eps) + half_rest = cond_rest + + if float(renorm_cfg) > 0.0: + ori_pos_norm = torch.linalg.vector_norm( + cond_eps, + dim=tuple(range(1, len(cond_eps.shape))), + keepdim=True + ) + max_new_norm = ori_pos_norm * float(renorm_cfg) + new_pos_norm = torch.linalg.vector_norm( + half_eps, dim=tuple(range(1, len(half_eps.shape))), keepdim=True + ) + if new_pos_norm >= max_new_norm: + half_eps = half_eps * (max_new_norm / new_pos_norm) + else: + cond_eps, uncond_eps = cond_denoised[:, :in_channels], uncond_denoised[:, :in_channels] + cond_rest, _ = cond_denoised[:, in_channels:], uncond_denoised[:, in_channels:] + half_eps = cond_eps + half_rest = cond_rest + + cfg_result = torch.cat([half_eps, half_rest], dim=1) + + # cfg_result = uncond_denoised + (cond_denoised - uncond_denoised) * cond_scale + + return x_orig - cfg_result + + m = model.clone() + m.set_model_sampler_cfg_function(renorm_cfg_func) + return io.NodeOutput(m) + + +NODES_LIST = [ + CLIPTextEncodeLumina2, + RenormCFG, +] diff --git a/nodes.py b/nodes.py index d1b3ff60c..17367c94e 100644 --- a/nodes.py +++ b/nodes.py @@ -2321,8 +2321,13 @@ def init_builtin_extra_nodes(): "v3/nodes_gits.py", "v3/nodes_hidream.py", "v3/nodes_images.py", + "v3/nodes_ip2p.py", "v3/nodes_latent.py", + "v3/nodes_load_3d.py", + "v3/nodes_lora_extract.py", + "v3/nodes_lotus.py", "v3/nodes_lt.py", + "v3/nodes_lumina2.py", "v3/nodes_mask.py", "v3/nodes_mochi.py", "v3/nodes_model_advanced.py", From b4d9a27fdb054b802f879a99cdbd212d4f963b31 Mon Sep 17 00:00:00 2001 From: bigcat88 Date: Thu, 24 Jul 2025 11:16:03 +0300 Subject: [PATCH 2/7] converted nodes files starting with "h" letter --- comfy_extras/v3/nodes_hunyuan.py | 167 ++++++++++++++++++++++++++ comfy_extras/v3/nodes_hypernetwork.py | 136 +++++++++++++++++++++ comfy_extras/v3/nodes_hypertile.py | 95 +++++++++++++++ nodes.py | 3 + 4 files changed, 401 insertions(+) create mode 100644 comfy_extras/v3/nodes_hunyuan.py create mode 100644 comfy_extras/v3/nodes_hypernetwork.py create mode 100644 comfy_extras/v3/nodes_hypertile.py diff --git a/comfy_extras/v3/nodes_hunyuan.py b/comfy_extras/v3/nodes_hunyuan.py new file mode 100644 index 000000000..d606081c2 --- /dev/null +++ b/comfy_extras/v3/nodes_hunyuan.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import torch + +import comfy.model_management +import node_helpers +import nodes +from comfy_api.v3 import io + +PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = ( + "<|start_header_id|>system<|end_header_id|>\n\n\nDescribe the video by detailing the following aspects according to the reference image: " + "1. The main content and theme of the video." + "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects." + "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects." + "4. background environment, light, style and atmosphere." + "5. camera angles, movements, and transitions used in the video:<|eot_id|>\n\n" + "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>" + "<|start_header_id|>assistant<|end_header_id|>\n\n" +) + +class CLIPTextEncodeHunyuanDiT(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="CLIPTextEncodeHunyuanDiT_V3", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("bert", multiline=True, dynamic_prompts=True), + io.String.Input("mt5xl", multiline=True, dynamic_prompts=True), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) + + @classmethod + def execute(cls, clip, bert, mt5xl): + tokens = clip.tokenize(bert) + tokens["mt5xl"] = clip.tokenize(mt5xl)["mt5xl"] + + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) + + +class EmptyHunyuanLatentVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="EmptyHunyuanLatentVideo_V3", + category="latent/video", + inputs=[ + io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=25, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + ], + outputs=[ + io.Latent.Output(), + ], + ) + + @classmethod + def execute(cls, width, height, length, batch_size): + latent = torch.zeros( + [batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], + device=comfy.model_management.intermediate_device(), + ) + return io.NodeOutput({"samples":latent}) + + +class HunyuanImageToVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="HunyuanImageToVideo_V3", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Vae.Input("vae"), + io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16), + io.Int.Input("length", default=53, min=1, max=nodes.MAX_RESOLUTION, step=4), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Combo.Input("guidance_type", options=["v1 (concat)", "v2 (replace)", "custom"]), + io.Image.Input("start_image", optional=True), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, positive, vae, width, height, length, batch_size, guidance_type, start_image=None): + latent = torch.zeros( + [batch_size, 16, ((length - 1) // 4) + 1, height // 8, width // 8], + device=comfy.model_management.intermediate_device(), + ) + out_latent = {} + + if start_image is not None: + start_image = comfy.utils.common_upscale( + start_image[:length, :, :, :3].movedim(-1, 1), width, height, "bilinear", "center" + ).movedim(1, -1) + + concat_latent_image = vae.encode(start_image) + mask = torch.ones( + (1, 1, latent.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), + device=start_image.device, + dtype=start_image.dtype, + ) + mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0 + + if guidance_type == "v1 (concat)": + cond = {"concat_latent_image": concat_latent_image, "concat_mask": mask} + elif guidance_type == "v2 (replace)": + cond = {'guiding_frame_index': 0} + latent[:, :, :concat_latent_image.shape[2]] = concat_latent_image + out_latent["noise_mask"] = mask + elif guidance_type == "custom": + cond = {"ref_latent": concat_latent_image} + + positive = node_helpers.conditioning_set_values(positive, cond) + + out_latent["samples"] = latent + return io.NodeOutput(positive, out_latent) + + +class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="TextEncodeHunyuanVideo_ImageToVideo_V3", + category="advanced/conditioning", + inputs=[ + io.Clip.Input("clip"), + io.ClipVisionOutput.Input("clip_vision_output"), + io.String.Input("prompt", multiline=True, dynamic_prompts=True), + io.Int.Input( + "image_interleave", + default=2, + min=1, + max=512, + tooltip="How much the image influences things vs the text prompt. Higher number means more influence from the text prompt.", + ), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) + + @classmethod + def execute(cls, clip, clip_vision_output, prompt, image_interleave): + tokens = clip.tokenize( + prompt, llama_template=PROMPT_TEMPLATE_ENCODE_VIDEO_I2V, + image_embeds=clip_vision_output.mm_projected, + image_interleave=image_interleave, + ) + return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens)) + + +NODES_LIST = [ + CLIPTextEncodeHunyuanDiT, + EmptyHunyuanLatentVideo, + HunyuanImageToVideo, + TextEncodeHunyuanVideo_ImageToVideo, +] diff --git a/comfy_extras/v3/nodes_hypernetwork.py b/comfy_extras/v3/nodes_hypernetwork.py new file mode 100644 index 000000000..907654cd1 --- /dev/null +++ b/comfy_extras/v3/nodes_hypernetwork.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +import logging + +import torch + +import comfy.utils +import folder_paths +from comfy_api.v3 import io + + +def load_hypernetwork_patch(path, strength): + sd = comfy.utils.load_torch_file(path, safe_load=True) + activation_func = sd.get('activation_func', 'linear') + is_layer_norm = sd.get('is_layer_norm', False) + use_dropout = sd.get('use_dropout', False) + activate_output = sd.get('activate_output', False) + last_layer_dropout = sd.get('last_layer_dropout', False) + + valid_activation = { + "linear": torch.nn.Identity, + "relu": torch.nn.ReLU, + "leakyrelu": torch.nn.LeakyReLU, + "elu": torch.nn.ELU, + "swish": torch.nn.Hardswish, + "tanh": torch.nn.Tanh, + "sigmoid": torch.nn.Sigmoid, + "softsign": torch.nn.Softsign, + "mish": torch.nn.Mish, + } + + logging.error( + "Unsupported Hypernetwork format, if you report it I might implement it. {} {} {} {} {} {}".format( + path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout + ) + ) + + out = {} + + for d in sd: + try: + dim = int(d) + except Exception: + continue + + output = [] + for index in [0, 1]: + attn_weights = sd[dim][index] + keys = attn_weights.keys() + + linears = filter(lambda a: a.endswith(".weight"), keys) + linears = list(map(lambda a: a[:-len(".weight")], linears)) + layers = [] + + i = 0 + while i < len(linears): + lin_name = linears[i] + last_layer = (i == (len(linears) - 1)) + penultimate_layer = (i == (len(linears) - 2)) + + lin_weight = attn_weights['{}.weight'.format(lin_name)] + lin_bias = attn_weights['{}.bias'.format(lin_name)] + layer = torch.nn.Linear(lin_weight.shape[1], lin_weight.shape[0]) + layer.load_state_dict({"weight": lin_weight, "bias": lin_bias}) + layers.append(layer) + if activation_func != "linear": + if (not last_layer) or (activate_output): + layers.append(valid_activation[activation_func]()) + if is_layer_norm: + i += 1 + ln_name = linears[i] + ln_weight = attn_weights['{}.weight'.format(ln_name)] + ln_bias = attn_weights['{}.bias'.format(ln_name)] + ln = torch.nn.LayerNorm(ln_weight.shape[0]) + ln.load_state_dict({"weight": ln_weight, "bias": ln_bias}) + layers.append(ln) + if use_dropout: + if (not last_layer) and (not penultimate_layer or last_layer_dropout): + layers.append(torch.nn.Dropout(p=0.3)) + i += 1 + + output.append(torch.nn.Sequential(*layers)) + out[dim] = torch.nn.ModuleList(output) + + class hypernetwork_patch: + def __init__(self, hypernet, strength): + self.hypernet = hypernet + self.strength = strength + + def __call__(self, q, k, v, extra_options): + dim = k.shape[-1] + if dim in self.hypernet: + hn = self.hypernet[dim] + k = k + hn[0](k) * self.strength + v = v + hn[1](v) * self.strength + + return q, k, v + + def to(self, device): + for d in self.hypernet.keys(): + self.hypernet[d] = self.hypernet[d].to(device) + return self + + return hypernetwork_patch(out, strength) + + +class HypernetworkLoader(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="HypernetworkLoader_V3", + category="loaders", + inputs=[ + io.Model.Input("model"), + io.Combo.Input("hypernetwork_name", options=folder_paths.get_filename_list("hypernetworks")), + io.Float.Input("strength", default=1.0, min=-10.0, max=10.0, step=0.01), + ], + outputs=[ + io.Model.Output(), + ], + ) + + @classmethod + def execute(cls, model, hypernetwork_name, strength): + hypernetwork_path = folder_paths.get_full_path_or_raise("hypernetworks", hypernetwork_name) + model_hypernetwork = model.clone() + patch = load_hypernetwork_patch(hypernetwork_path, strength) + if patch is not None: + model_hypernetwork.set_model_attn1_patch(patch) + model_hypernetwork.set_model_attn2_patch(patch) + return io.NodeOutput(model_hypernetwork) + + +NODES_LIST = [ + HypernetworkLoader, +] diff --git a/comfy_extras/v3/nodes_hypertile.py b/comfy_extras/v3/nodes_hypertile.py new file mode 100644 index 000000000..bf6ea11ce --- /dev/null +++ b/comfy_extras/v3/nodes_hypertile.py @@ -0,0 +1,95 @@ +"""Taken from: https://github.com/tfernd/HyperTile/""" + +from __future__ import annotations + +import math + +from einops import rearrange +from torch import randint + +from comfy_api.v3 import io + + +def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: + min_value = min(min_value, value) + + # All big divisors of value (inclusive) + divisors = [i for i in range(min_value, value + 1) if value % i == 0] + + ns = [value // i for i in divisors[:max_options]] # has at least 1 element + + if len(ns) - 1 > 0: + idx = randint(low=0, high=len(ns) - 1, size=(1,)).item() + else: + idx = 0 + + return ns[idx] + + +class HyperTile(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="HyperTile_V3", + category="model_patches/unet", + inputs=[ + io.Model.Input(id="model"), + io.Int.Input(id="tile_size", default=256, min=1, max=2048), + io.Int.Input(id="swap_size", default=2, min=1, max=128), + io.Int.Input(id="max_depth", default=0, min=0, max=10), + io.Boolean.Input(id="scale_depth", default=False), + ], + outputs=[ + io.Model.Output(), + ], + ) + + @classmethod + def execute(cls, model, tile_size, swap_size, max_depth, scale_depth): + latent_tile_size = max(32, tile_size) // 8 + temp = None + + def hypertile_in(q, k, v, extra_options): + nonlocal temp + model_chans = q.shape[-2] + orig_shape = extra_options['original_shape'] + apply_to = [] + for i in range(max_depth + 1): + apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i))) + + if model_chans in apply_to: + shape = extra_options["original_shape"] + aspect_ratio = shape[-1] / shape[-2] + + hw = q.size(1) + h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) + + factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1 + nh = random_divisor(h, latent_tile_size * factor, swap_size) + nw = random_divisor(w, latent_tile_size * factor, swap_size) + + if nh * nw > 1: + q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + temp = (nh, nw, h, w) + return q, k, v + + return q, k, v + + def hypertile_out(out, extra_options): + nonlocal temp + if temp is not None: + nh, nw, h, w = temp + temp = None + out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) + out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + return out + + m = model.clone() + m.set_model_attn1_patch(hypertile_in) + m.set_model_attn1_output_patch(hypertile_out) + return io.NodeOutput(m) + + +NODES_LIST = [ + HyperTile, +] diff --git a/nodes.py b/nodes.py index 17367c94e..b1224d33f 100644 --- a/nodes.py +++ b/nodes.py @@ -2320,6 +2320,9 @@ def init_builtin_extra_nodes(): "v3/nodes_fresca.py", "v3/nodes_gits.py", "v3/nodes_hidream.py", + "v3/nodes_hunyuan.py", + "v3/nodes_hypernetwork.py", + "v3/nodes_hypertile.py", "v3/nodes_images.py", "v3/nodes_ip2p.py", "v3/nodes_latent.py", From 487ec28b9cd092c670511c43d83af30b89cd6109 Mon Sep 17 00:00:00 2001 From: bigcat88 Date: Thu, 24 Jul 2025 11:36:42 +0300 Subject: [PATCH 3/7] converted last nodes for "u" and "v" letters --- comfy_extras/v3/nodes_upscale_model.py | 106 +++++++++++ comfy_extras/v3/nodes_video_model.py | 232 +++++++++++++++++++++++++ nodes.py | 2 + 3 files changed, 340 insertions(+) create mode 100644 comfy_extras/v3/nodes_upscale_model.py create mode 100644 comfy_extras/v3/nodes_video_model.py diff --git a/comfy_extras/v3/nodes_upscale_model.py b/comfy_extras/v3/nodes_upscale_model.py new file mode 100644 index 000000000..a3337f9ad --- /dev/null +++ b/comfy_extras/v3/nodes_upscale_model.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +import logging + +import torch +from spandrel import ImageModelDescriptor, ModelLoader + +import comfy.utils +import folder_paths +from comfy import model_management +from comfy_api.v3 import io + +try: + from spandrel import MAIN_REGISTRY + from spandrel_extra_arches import EXTRA_REGISTRY + MAIN_REGISTRY.add(*EXTRA_REGISTRY) + logging.info("Successfully imported spandrel_extra_arches: support for non commercial upscale models.") +except Exception: + pass + + +class ImageUpscaleWithModel(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ImageUpscaleWithModel_V3", + display_name="Upscale Image (using Model) _V3", + category="image/upscaling", + inputs=[ + io.UpscaleModel.Input("upscale_model"), + io.Image.Input("image"), + ], + outputs=[ + io.Image.Output(), + ], + ) + + @classmethod + def execute(cls, upscale_model, image): + device = model_management.get_torch_device() + + memory_required = model_management.module_size(upscale_model.model) + memory_required += (512 * 512 * 3) * image.element_size() * max(upscale_model.scale, 1.0) * 384.0 #The 384.0 is an estimate of how much some of these models take, TODO: make it more accurate + memory_required += image.nelement() * image.element_size() + model_management.free_memory(memory_required, device) + + upscale_model.to(device) + in_img = image.movedim(-1,-3).to(device) + + tile = 512 + overlap = 32 + + oom = True + while oom: + try: + steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps( + in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap + ) + pbar = comfy.utils.ProgressBar(steps) + s = comfy.utils.tiled_scale( + in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar + ) + oom = False + except model_management.OOM_EXCEPTION as e: + tile //= 2 + if tile < 128: + raise e + + upscale_model.to("cpu") + s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0) + return io.NodeOutput(s) + + +class UpscaleModelLoader(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="UpscaleModelLoader_V3", + display_name="Load Upscale Model _V3", + category="loaders", + inputs=[ + io.Combo.Input("model_name", options=folder_paths.get_filename_list("upscale_models")), + ], + outputs=[ + io.UpscaleModel.Output(), + ], + ) + + @classmethod + def execute(cls, model_name): + model_path = folder_paths.get_full_path_or_raise("upscale_models", model_name) + sd = comfy.utils.load_torch_file(model_path, safe_load=True) + if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd: + sd = comfy.utils.state_dict_prefix_replace(sd, {"module.":""}) + out = ModelLoader().load_from_state_dict(sd).eval() + + if not isinstance(out, ImageModelDescriptor): + raise Exception("Upscale model must be a single-image model.") + + return io.NodeOutput(out) + + +NODES_LIST = [ + ImageUpscaleWithModel, + UpscaleModelLoader, +] diff --git a/comfy_extras/v3/nodes_video_model.py b/comfy_extras/v3/nodes_video_model.py new file mode 100644 index 000000000..3c17eb918 --- /dev/null +++ b/comfy_extras/v3/nodes_video_model.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +import torch + +import comfy.sd +import comfy.utils +import comfy_extras.nodes_model_merging +import folder_paths +import node_helpers +import nodes +from comfy_api.v3 import io + + +class ConditioningSetAreaPercentageVideo(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ConditioningSetAreaPercentageVideo_V3", + category="conditioning", + inputs=[ + io.Conditioning.Input("conditioning"), + io.Float.Input("width", default=1.0, min=0, max=1.0, step=0.01), + io.Float.Input("height", default=1.0, min=0, max=1.0, step=0.01), + io.Float.Input("temporal", default=1.0, min=0, max=1.0, step=0.01), + io.Float.Input("x", default=0, min=0, max=1.0, step=0.01), + io.Float.Input("y", default=0, min=0, max=1.0, step=0.01), + io.Float.Input("z", default=0, min=0, max=1.0, step=0.01), + io.Float.Input("strength", default=1.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[ + io.Conditioning.Output(), + ], + ) + + @classmethod + def execute(cls, conditioning, width, height, temporal, x, y, z, strength): + c = node_helpers.conditioning_set_values( + conditioning, + { + "area": ("percentage", temporal, height, width, z, y, x), + "strength": strength, + "set_area_to_bounds": False + ,} + ) + return io.NodeOutput(c) + + +class ImageOnlyCheckpointLoader(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ImageOnlyCheckpointLoader_V3", + display_name="Image Only Checkpoint Loader (img2vid model) _V3", + category="loaders/video_models", + inputs=[ + io.Combo.Input("ckpt_name", options=folder_paths.get_filename_list("checkpoints")), + ], + outputs=[ + io.Model.Output(), + io.ClipVision.Output(), + io.Vae.Output(), + ], + ) + + @classmethod + def execute(cls, ckpt_name): + ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config( + ckpt_path, + output_vae=True, + output_clip=False, + output_clipvision=True, + embedding_directory=folder_paths.get_folder_paths("embeddings"), + ) + return io.NodeOutput(out[0], out[3], out[2]) + + +class ImageOnlyCheckpointSave(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ImageOnlyCheckpointSave_V3", + category="advanced/model_merging", + inputs=[ + io.Model.Input("model"), + io.ClipVision.Input("clip_vision"), + io.Vae.Input("vae"), + io.String.Input("filename_prefix", default="checkpoints/ComfyUI"), + ], + outputs=[], + hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo], + ) + + @classmethod + def execute(cls, model, clip_vision, vae, filename_prefix): + output_dir = folder_paths.get_output_directory() + comfy_extras.nodes_model_merging.save_checkpoint( + model, + clip_vision=clip_vision, + vae=vae, + filename_prefix=filename_prefix, + output_dir=output_dir, + prompt=cls.hidden.prompt, + extra_pnginfo=cls.hidden.extra_pnginfo, + ) + return io.NodeOutput() + + +class SVD_img2vid_Conditioning(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SVD_img2vid_Conditioning_V3", + category="conditioning/video_models", + inputs=[ + io.ClipVision.Input("clip_vision"), + io.Image.Input("init_image"), + io.Vae.Input("vae"), + io.Int.Input("width", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=576, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("video_frames", default=14, min=1, max=4096), + io.Int.Input("motion_bucket_id", default=127, min=1, max=1023), + io.Int.Input("fps", default=6, min=1, max=1024), + io.Float.Input("augmentation_level", default=0.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id, fps, augmentation_level): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = comfy.utils.common_upscale( + init_image.movedim(-1,1), width, height, "bilinear", "center" + ).movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + if augmentation_level > 0: + encode_pixels += torch.randn_like(pixels) * augmentation_level + t = vae.encode(encode_pixels) + positive = [ + [ + pooled, + {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t}, + ] + ] + negative = [ + [ + torch.zeros_like(pooled), + {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)}, + ] + ] + latent = torch.zeros([video_frames, 4, height // 8, width // 8]) + return io.NodeOutput(positive, negative, {"samples":latent}) + + +class VideoLinearCFGGuidance(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VideoLinearCFGGuidance_V3", + category="sampling/video_models", + inputs=[ + io.Model.Input("model"), + io.Float.Input("min_cfg", default=1.0, min=0.0, max=100.0, step=0.5, round=0.01), + ], + outputs=[ + io.Model.Output(), + ], + ) + + @classmethod + def execute(cls, model, min_cfg): + def linear_cfg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + + scale = torch.linspace( + min_cfg, cond_scale, cond.shape[0], device=cond.device + ).reshape((cond.shape[0], 1, 1, 1)) + return uncond + scale * (cond - uncond) + + m = model.clone() + m.set_model_sampler_cfg_function(linear_cfg) + return io.NodeOutput(m) + + +class VideoTriangleCFGGuidance(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VideoTriangleCFGGuidance_V3", + category="sampling/video_models", + inputs=[ + io.Model.Input("model"), + io.Float.Input("min_cfg", default=1.0, min=0.0, max=100.0, step=0.5, round=0.01), + ], + outputs=[ + io.Model.Output(), + ], + ) + + @classmethod + def execute(cls, model, min_cfg): + def linear_cfg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + period = 1.0 + values = torch.linspace(0, 1, cond.shape[0], device=cond.device) + values = 2 * (values / period - torch.floor(values / period + 0.5)).abs() + scale = (values * (cond_scale - min_cfg) + min_cfg).reshape((cond.shape[0], 1, 1, 1)) + + return uncond + scale * (cond - uncond) + + m = model.clone() + m.set_model_sampler_cfg_function(linear_cfg) + return io.NodeOutput(m) + + +NODES_LIST = [ + ConditioningSetAreaPercentageVideo, + ImageOnlyCheckpointLoader, + ImageOnlyCheckpointSave, + SVD_img2vid_Conditioning, + VideoLinearCFGGuidance, + VideoTriangleCFGGuidance, +] diff --git a/nodes.py b/nodes.py index b1224d33f..296aa0027 100644 --- a/nodes.py +++ b/nodes.py @@ -2350,7 +2350,9 @@ def init_builtin_extra_nodes(): "v3/nodes_sdupscale.py", "v3/nodes_slg.py", "v3/nodes_stable_cascade.py", + "v3/nodes_upscale_model.py", "v3/nodes_video.py", + "v3/nodes_video_model.py", "v3/nodes_wan.py", "v3/nodes_webcam.py", ] From 2ea2bc2941321317de3cb5f4365463599d95dfe5 Mon Sep 17 00:00:00 2001 From: bigcat88 Date: Thu, 24 Jul 2025 15:22:35 +0300 Subject: [PATCH 4/7] converted nodes files starting with "t" letter --- comfy_extras/v3/nodes_tcfg.py | 70 +++ comfy_extras/v3/nodes_tomesd.py | 190 +++++++ comfy_extras/v3/nodes_torch_compile.py | 32 ++ comfy_extras/v3/nodes_train.py | 658 +++++++++++++++++++++++++ nodes.py | 4 + 5 files changed, 954 insertions(+) create mode 100644 comfy_extras/v3/nodes_tcfg.py create mode 100644 comfy_extras/v3/nodes_tomesd.py create mode 100644 comfy_extras/v3/nodes_torch_compile.py create mode 100644 comfy_extras/v3/nodes_train.py diff --git a/comfy_extras/v3/nodes_tcfg.py b/comfy_extras/v3/nodes_tcfg.py new file mode 100644 index 000000000..62c6100d6 --- /dev/null +++ b/comfy_extras/v3/nodes_tcfg.py @@ -0,0 +1,70 @@ +"""TCFG: Tangential Damping Classifier-free Guidance - (arXiv: https://arxiv.org/abs/2503.18137)""" + +from __future__ import annotations + +import torch + +from comfy_api.v3 import io + + +def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tensor) -> torch.Tensor: + """Drop tangential components from uncond score to align with cond score.""" + # (B, 1, ...) + batch_num = cond_score.shape[0] + cond_score_flat = cond_score.reshape(batch_num, 1, -1).float() + uncond_score_flat = uncond_score.reshape(batch_num, 1, -1).float() + + # Score matrix A (B, 2, ...) + score_matrix = torch.cat((uncond_score_flat, cond_score_flat), dim=1) + try: + _, _, Vh = torch.linalg.svd(score_matrix, full_matrices=False) + except RuntimeError: + # Fallback to CPU + _, _, Vh = torch.linalg.svd(score_matrix.cpu(), full_matrices=False) + + # Drop the tangential components + v1 = Vh[:, 0:1, :].to(uncond_score_flat.device) # (B, 1, ...) + uncond_score_td = (uncond_score_flat @ v1.transpose(-2, -1)) * v1 + return uncond_score_td.reshape_as(uncond_score).to(uncond_score.dtype) + + +class TCFG(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="TCFG_V3", + display_name="Tangential Damping CFG _V3", + category="advanced/guidance", + description="TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality.", + inputs=[ + io.Model.Input("model"), + ], + outputs=[ + io.Model.Output(display_name="patched_model"), + ], + ) + + @classmethod + def execute(cls, model): + m = model.clone() + + def tangential_damping_cfg(args): + # Assume [cond, uncond, ...] + x = args["input"] + conds_out = args["conds_out"] + if len(conds_out) <= 1 or None in args["conds"][:2]: + # Skip when either cond or uncond is None + return conds_out + cond_pred = conds_out[0] + uncond_pred = conds_out[1] + uncond_td = score_tangential_damping(x - cond_pred, x - uncond_pred) + uncond_pred_td = x - uncond_td + return [cond_pred, uncond_pred_td] + conds_out[2:] + + m.set_model_sampler_pre_cfg_function(tangential_damping_cfg) + return io.NodeOutput(m) + + +NODES_LIST = [ + TCFG, +] diff --git a/comfy_extras/v3/nodes_tomesd.py b/comfy_extras/v3/nodes_tomesd.py new file mode 100644 index 000000000..5032b1482 --- /dev/null +++ b/comfy_extras/v3/nodes_tomesd.py @@ -0,0 +1,190 @@ +"""Taken from: https://github.com/dbolya/tomesd""" + +from __future__ import annotations + +import math +from typing import Callable, Tuple + +import torch + +from comfy_api.v3 import io + + +def do_nothing(x: torch.Tensor, mode:str=None): + return x + + +def mps_gather_workaround(input, dim, index): + if input.shape[-1] == 1: + return torch.gather( + input.unsqueeze(-1), + dim - 1 if dim < 0 else dim, + index.unsqueeze(-1) + ).squeeze(-1) + return torch.gather(input, dim, index) + + +def bipartite_soft_matching_random2d( + metric: torch.Tensor,w: int, h: int, sx: int, sy: int, r: int, no_rand: bool = False +) -> Tuple[Callable, Callable]: + """ + Partitions the tokens into src and dst and merges r tokens from src to dst. + Dst tokens are partitioned by choosing one randomy in each (sx, sy) region. + Args: + - metric [B, N, C]: metric to use for similarity + - w: image width in tokens + - h: image height in tokens + - sx: stride in the x dimension for dst, must divide w + - sy: stride in the y dimension for dst, must divide h + - r: number of tokens to remove (by merging) + - no_rand: if true, disable randomness (use top left corner only) + """ + B, N, _ = metric.shape + + if r <= 0 or w == 1 or h == 1: + return do_nothing, do_nothing + + gather = mps_gather_workaround if metric.device.type == "mps" else torch.gather + + with torch.no_grad(): + hsy, wsx = h // sy, w // sx + + # For each sy by sx kernel, randomly assign one token to be dst and the rest src + if no_rand: + rand_idx = torch.zeros(hsy, wsx, 1, device=metric.device, dtype=torch.int64) + else: + rand_idx = torch.randint(sy*sx, size=(hsy, wsx, 1), device=metric.device) + + # The image might not divide sx and sy, so we need to work on a view of the top left if the idx buffer instead + idx_buffer_view = torch.zeros(hsy, wsx, sy*sx, device=metric.device, dtype=torch.int64) + idx_buffer_view.scatter_(dim=2, index=rand_idx, src=-torch.ones_like(rand_idx, dtype=rand_idx.dtype)) + idx_buffer_view = idx_buffer_view.view(hsy, wsx, sy, sx).transpose(1, 2).reshape(hsy * sy, wsx * sx) + + # Image is not divisible by sx or sy so we need to move it into a new buffer + if (hsy * sy) < h or (wsx * sx) < w: + idx_buffer = torch.zeros(h, w, device=metric.device, dtype=torch.int64) + idx_buffer[:(hsy * sy), :(wsx * sx)] = idx_buffer_view + else: + idx_buffer = idx_buffer_view + + # We set dst tokens to be -1 and src to be 0, so an argsort gives us dst|src indices + rand_idx = idx_buffer.reshape(1, -1, 1).argsort(dim=1) + + # We're finished with these + del idx_buffer, idx_buffer_view + + # rand_idx is currently dst|src, so split them + num_dst = hsy * wsx + a_idx = rand_idx[:, num_dst:, :] # src + b_idx = rand_idx[:, :num_dst, :] # dst + + def split(x): + C = x.shape[-1] + src = gather(x, dim=1, index=a_idx.expand(B, N - num_dst, C)) + dst = gather(x, dim=1, index=b_idx.expand(B, num_dst, C)) + return src, dst + + # Cosine similarity between A and B + metric = metric / metric.norm(dim=-1, keepdim=True) + a, b = split(metric) + scores = a @ b.transpose(-1, -2) + + # Can't reduce more than the # tokens in src + r = min(a.shape[1], r) + + # Find the most similar greedily + node_max, node_idx = scores.max(dim=-1) + edge_idx = node_max.argsort(dim=-1, descending=True)[..., None] + + unm_idx = edge_idx[..., r:, :] # Unmerged Tokens + src_idx = edge_idx[..., :r, :] # Merged Tokens + dst_idx = gather(node_idx[..., None], dim=-2, index=src_idx) + + def merge(x: torch.Tensor, mode="mean") -> torch.Tensor: + src, dst = split(x) + n, t1, c = src.shape + + unm = gather(src, dim=-2, index=unm_idx.expand(n, t1 - r, c)) + src = gather(src, dim=-2, index=src_idx.expand(n, r, c)) + dst = dst.scatter_reduce(-2, dst_idx.expand(n, r, c), src, reduce=mode) + + return torch.cat([unm, dst], dim=1) + + def unmerge(x: torch.Tensor) -> torch.Tensor: + unm_len = unm_idx.shape[1] + unm, dst = x[..., :unm_len, :], x[..., unm_len:, :] + _, _, c = unm.shape + + src = gather(dst, dim=-2, index=dst_idx.expand(B, r, c)) + + # Combine back to the original shape + out = torch.zeros(B, N, c, device=x.device, dtype=x.dtype) + out.scatter_(dim=-2, index=b_idx.expand(B, num_dst, c), src=dst) + out.scatter_(dim=-2, index=gather(a_idx.expand(B, a_idx.shape[1], 1), dim=1, index=unm_idx).expand(B, unm_len, c), src=unm) + out.scatter_(dim=-2, index=gather(a_idx.expand(B, a_idx.shape[1], 1), dim=1, index=src_idx).expand(B, r, c), src=src) + + return out + + return merge, unmerge + + +def get_functions(x, ratio, original_shape): + b, c, original_h, original_w = original_shape + original_tokens = original_h * original_w + downsample = int(math.ceil(math.sqrt(original_tokens // x.shape[1]))) + stride_x = 2 + stride_y = 2 + max_downsample = 1 + + if downsample <= max_downsample: + w = int(math.ceil(original_w / downsample)) + h = int(math.ceil(original_h / downsample)) + r = int(x.shape[1] * ratio) + no_rand = False + m, u = bipartite_soft_matching_random2d(x, w, h, stride_x, stride_y, r, no_rand) + return m, u + + def nothing(y): + return y + + return nothing, nothing + + +class TomePatchModel(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="TomePatchModel_V3", + category="model_patches/unet", + inputs=[ + io.Model.Input("model"), + io.Float.Input("ratio", default=0.3, min=0.0, max=1.0, step=0.01), + ], + outputs=[ + io.Model.Output(), + ], + ) + + @classmethod + def execute(cls, model, ratio): + u = None + + def tomesd_m(q, k, v, extra_options): + nonlocal u + #NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q + #however from my basic testing it seems that using q instead gives better results + m, u = get_functions(q, ratio, extra_options["original_shape"]) + return m(q), k, v + + def tomesd_u(n, extra_options): + return u(n) + + m = model.clone() + m.set_model_attn1_patch(tomesd_m) + m.set_model_attn1_output_patch(tomesd_u) + return io.NodeOutput(m) + + +NODES_LIST = [ + TomePatchModel, +] diff --git a/comfy_extras/v3/nodes_torch_compile.py b/comfy_extras/v3/nodes_torch_compile.py new file mode 100644 index 000000000..528de0e86 --- /dev/null +++ b/comfy_extras/v3/nodes_torch_compile.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from comfy_api.torch_helpers import set_torch_compile_wrapper +from comfy_api.v3 import io + + +class TorchCompileModel(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="TorchCompileModel_V3", + category="_for_testing", + is_experimental=True, + inputs=[ + io.Model.Input("model"), + io.Combo.Input("backend", options=["inductor", "cudagraphs"]), + ], + outputs=[ + io.Model.Output(), + ], + ) + + @classmethod + def execute(cls, model, backend): + m = model.clone() + set_torch_compile_wrapper(model=m, backend=backend) + return io.NodeOutput(m) + + +NODES_LIST = [ + TorchCompileModel, +] diff --git a/comfy_extras/v3/nodes_train.py b/comfy_extras/v3/nodes_train.py new file mode 100644 index 000000000..1c9290bbf --- /dev/null +++ b/comfy_extras/v3/nodes_train.py @@ -0,0 +1,658 @@ +from __future__ import annotations + +import logging +import os + +import numpy as np +import safetensors +import torch +import torch.utils.checkpoint +import tqdm +from PIL import Image, ImageDraw, ImageFont + +import comfy.model_management +import comfy.samplers +import comfy.sd +import comfy.utils +import comfy_extras.nodes_custom_sampler +import folder_paths +import node_helpers +from comfy.weight_adapter import adapters +from comfy_api.v3 import io, ui + + +def make_batch_extra_option_dict(d, indicies, full_size=None): + new_dict = {} + for k, v in d.items(): + newv = v + if isinstance(v, dict): + newv = make_batch_extra_option_dict(v, indicies, full_size=full_size) + elif isinstance(v, torch.Tensor): + if full_size is None or v.size(0) == full_size: + newv = v[indicies] + elif isinstance(v, (list, tuple)) and len(v) == full_size: + newv = [v[i] for i in indicies] + new_dict[k] = newv + return new_dict + + +class TrainSampler(comfy.samplers.Sampler): + + def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, total_steps=1, seed=0, training_dtype=torch.bfloat16): + self.loss_fn = loss_fn + self.optimizer = optimizer + self.loss_callback = loss_callback + self.batch_size = batch_size + self.total_steps = total_steps + self.seed = seed + self.training_dtype = training_dtype + + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + cond = model_wrap.conds["positive"] + dataset_size = sigmas.size(0) + torch.cuda.empty_cache() + for i in (pbar:=tqdm.trange(self.total_steps, desc="Training LoRA", smoothing=0.01, disable=not comfy.utils.PROGRESS_BAR_ENABLED)): + noisegen = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(self.seed + i * 1000) + indicies = torch.randperm(dataset_size)[:self.batch_size].tolist() + + batch_latent = torch.stack([latent_image[i] for i in indicies]) + batch_noise = noisegen.generate_noise({"samples": batch_latent}).to(batch_latent.device) + batch_sigmas = [ + model_wrap.inner_model.model_sampling.percent_to_sigma( + torch.rand((1,)).item() + ) for _ in range(min(self.batch_size, dataset_size)) + ] + batch_sigmas = torch.tensor(batch_sigmas).to(batch_latent.device) + + xt = model_wrap.inner_model.model_sampling.noise_scaling( + batch_sigmas, + batch_noise, + batch_latent, + False + ) + x0 = model_wrap.inner_model.model_sampling.noise_scaling( + torch.zeros_like(batch_sigmas), + torch.zeros_like(batch_noise), + batch_latent, + False + ) + + model_wrap.conds["positive"] = [ + cond[i] for i in indicies + ] + batch_extra_args = make_batch_extra_option_dict(extra_args, indicies, full_size=dataset_size) + + with torch.autocast(xt.device.type, dtype=self.training_dtype): + x0_pred = model_wrap(xt, batch_sigmas, **batch_extra_args) + loss = self.loss_fn(x0_pred, x0) + loss.backward() + if self.loss_callback: + self.loss_callback(loss.item()) + pbar.set_postfix({"loss": f"{loss.item():.4f}"}) + + self.optimizer.step() + self.optimizer.zero_grad() + torch.cuda.empty_cache() + return torch.zeros_like(latent_image) + + +class BiasDiff(torch.nn.Module): + def __init__(self, bias): + super().__init__() + self.bias = bias + + def __call__(self, b): + org_dtype = b.dtype + return (b.to(self.bias) + self.bias).to(org_dtype) + + def passive_memory_usage(self): + return self.bias.nelement() * self.bias.element_size() + + def move_to(self, device): + self.to(device=device) + return self.passive_memory_usage() + + +def load_and_process_images(image_files, input_dir, resize_method="None", w=None, h=None): + """Utility function to load and process a list of images. + + Args: + image_files: List of image filenames + input_dir: Base directory containing the images + resize_method: How to handle images of different sizes ("None", "Stretch", "Crop", "Pad") + + Returns: + torch.Tensor: Batch of processed images + """ + if not image_files: + raise ValueError("No valid images found in input") + + output_images = [] + + for file in image_files: + image_path = os.path.join(input_dir, file) + img = node_helpers.pillow(Image.open, image_path) + + if img.mode == "I": + img = img.point(lambda i: i * (1 / 255)) + img = img.convert("RGB") + + if w is None and h is None: + w, h = img.size[0], img.size[1] + + # Resize image to first image + if img.size[0] != w or img.size[1] != h: + if resize_method == "Stretch": + img = img.resize((w, h), Image.Resampling.LANCZOS) + elif resize_method == "Crop": + img = img.crop((0, 0, w, h)) + elif resize_method == "Pad": + img = img.resize((w, h), Image.Resampling.LANCZOS) + elif resize_method == "None": + raise ValueError( + "Your input image size does not match the first image in the dataset. Either select a valid resize method or use the same size for all images." + ) + + img_array = np.array(img).astype(np.float32) / 255.0 + img_tensor = torch.from_numpy(img_array)[None,] + output_images.append(img_tensor) + + return torch.cat(output_images, dim=0) + + +def draw_loss_graph(loss_map, steps): + width, height = 500, 300 + img = Image.new("RGB", (width, height), "white") + draw = ImageDraw.Draw(img) + + min_loss, max_loss = min(loss_map.values()), max(loss_map.values()) + scaled_loss = [(l_v - min_loss) / (max_loss - min_loss) for l_v in loss_map.values()] + + prev_point = (0, height - int(scaled_loss[0] * height)) + for i, l_v in enumerate(scaled_loss[1:], start=1): + x = int(i / (steps - 1) * width) + y = height - int(l_v * height) + draw.line([prev_point, (x, y)], fill="blue", width=2) + prev_point = (x, y) + + return img + + +def find_all_highest_child_module_with_forward(model: torch.nn.Module, result = None, name = None): + if result is None: + result = [] + elif hasattr(model, "forward") and not isinstance(model, (torch.nn.ModuleList, torch.nn.Sequential, torch.nn.ModuleDict)): + result.append(model) + logging.debug(f"Found module with forward: {name} ({model.__class__.__name__})") + return result + name = name or "root" + for next_name, child in model.named_children(): + find_all_highest_child_module_with_forward(child, result, f"{name}.{next_name}") + return result + + +def patch(m): + if not hasattr(m, "forward"): + return + org_forward = m.forward + def fwd(args, kwargs): + return org_forward(*args, **kwargs) + def checkpointing_fwd(*args, **kwargs): + return torch.utils.checkpoint.checkpoint( + fwd, args, kwargs, use_reentrant=False + ) + m.org_forward = org_forward + m.forward = checkpointing_fwd + + +def unpatch(m): + if hasattr(m, "org_forward"): + m.forward = m.org_forward + del m.org_forward + + +class LoadImageSetFromFolderNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoadImageSetFromFolderNode_V3", + display_name="Load Image Dataset from Folder _V3", + category="loaders", + description="Loads a batch of images from a directory for training.", + is_experimental=True, + inputs=[ + io.Combo.Input( + "folder", options=folder_paths.get_input_subfolders(), tooltip="The folder to load images from." + ), + io.Combo.Input( + "resize_method", options=["None", "Stretch", "Crop", "Pad"], default="None", optional=True + ), + ], + outputs=[ + io.Image.Output(), + ], + ) + + @classmethod + def execute(cls, folder, resize_method="None"): + sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) + valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] + image_files = [ + f + for f in os.listdir(sub_input_dir) + if any(f.lower().endswith(ext) for ext in valid_extensions) + ] + return io.NodeOutput(load_and_process_images(image_files, sub_input_dir, resize_method)) + + +class LoadImageTextSetFromFolderNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoadImageTextSetFromFolderNode_V3", + display_name="Load Image and Text Dataset from Folder _V3", + category="loaders", + description="Loads a batch of images and caption from a directory for training.", + is_experimental=True, + inputs=[ + io.Combo.Input("folder", options=folder_paths.get_input_subfolders(), tooltip="The folder to load images from."), + io.Clip.Input("clip", tooltip="The CLIP model used for encoding the text."), + io.Combo.Input("resize_method", options=["None", "Stretch", "Crop", "Pad"], default="None", optional=True), + io.Int.Input("width", default=-1, min=-1, max=10000, step=1, tooltip="The width to resize the images to. -1 means use the original width.", optional=True), + io.Int.Input("height", default=-1, min=-1, max=10000, step=1, tooltip="The height to resize the images to. -1 means use the original height.", optional=True), + ], + outputs=[ + io.Image.Output(), + io.Conditioning.Output(), + ], + ) + + @classmethod + def execute(cls, folder, clip, resize_method="None", width=None, height=None): + if clip is None: + raise RuntimeError( + "ERROR: clip input is invalid: None\n\n" + "If the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model." + ) + + logging.info(f"Loading images from folder: {folder}") + + sub_input_dir = os.path.join(folder_paths.get_input_directory(), folder) + valid_extensions = [".png", ".jpg", ".jpeg", ".webp"] + + image_files = [] + for item in os.listdir(sub_input_dir): + path = os.path.join(sub_input_dir, item) + if any(item.lower().endswith(ext) for ext in valid_extensions): + image_files.append(path) + elif os.path.isdir(path): + # Support kohya-ss/sd-scripts folder structure + repeat = 1 + if item.split("_")[0].isdigit(): + repeat = int(item.split("_")[0]) + image_files.extend([ + os.path.join(path, f) for f in os.listdir(path) if any(f.lower().endswith(ext) for ext in valid_extensions) + ] * repeat) + + caption_file_path = [ + f.replace(os.path.splitext(f)[1], ".txt") + for f in image_files + ] + captions = [] + for caption_file in caption_file_path: + caption_path = os.path.join(sub_input_dir, caption_file) + if os.path.exists(caption_path): + with open(caption_path, "r", encoding="utf-8") as f: + caption = f.read().strip() + captions.append(caption) + else: + captions.append("") + + width = width if width != -1 else None + height = height if height != -1 else None + output_tensor = load_and_process_images(image_files, sub_input_dir, resize_method, width, height) + + logging.info(f"Loaded {len(output_tensor)} images from {sub_input_dir}.") + + logging.info(f"Encoding captions from {sub_input_dir}.") + conditions = [] + empty_cond = clip.encode_from_tokens_scheduled(clip.tokenize("")) + for text in captions: + if text == "": + conditions.append(empty_cond) + tokens = clip.tokenize(text) + conditions.extend(clip.encode_from_tokens_scheduled(tokens)) + logging.info(f"Encoded {len(conditions)} captions from {sub_input_dir}.") + return io.NodeOutput(output_tensor, conditions) + + +class LoraModelLoader(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LoraModelLoader_V3", + display_name="Load LoRA Model _V3", + category="loaders", + description="Load Trained LoRA weights from Train LoRA node.", + is_experimental=True, + inputs=[ + io.Model.Input("model", tooltip="The diffusion model the LoRA will be applied to."), + io.LoraModel.Input("lora", tooltip="The LoRA model to apply to the diffusion model."), + io.Float.Input("strength_model", default=1.0, min=-100.0, max=100.0, step=0.01, tooltip="How strongly to modify the diffusion model. This value can be negative."), + ], + outputs=[ + io.Model.Output(tooltip="The modified diffusion model."), + ], + ) + + @classmethod + def execute(cls, model, lora, strength_model): + if strength_model == 0: + return io.NodeOutput(model) + + model_lora, _ = comfy.sd.load_lora_for_models(model, None, lora, strength_model, 0) + return io.NodeOutput(model_lora) + + +class LossGraphNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="LossGraphNode_V3", + display_name="Plot Loss Graph _V3", + category="training", + description="Plots the loss graph and saves it to the output directory.", + is_experimental=True, + is_output_node=True, + inputs=[ + io.LossMap.Input("loss"), # TODO: original V1 node has also `default={}` parameter + io.String.Input("filename_prefix", default="loss_graph"), + ], + outputs=[], + hidden=[io.Hidden.prompt, io.Hidden.extra_pnginfo], + ) + + @classmethod + def execute(cls, loss, filename_prefix): + loss_values = loss["loss"] + width, height = 800, 480 + margin = 40 + + img = Image.new( + "RGB", (width + margin, height + margin), "white" + ) # Extend canvas + draw = ImageDraw.Draw(img) + + min_loss, max_loss = min(loss_values), max(loss_values) + scaled_loss = [(l_v - min_loss) / (max_loss - min_loss) for l_v in loss_values] + + steps = len(loss_values) + + prev_point = (margin, height - int(scaled_loss[0] * height)) + for i, l_v in enumerate(scaled_loss[1:], start=1): + x = margin + int(i / steps * width) # Scale X properly + y = height - int(l_v * height) + draw.line([prev_point, (x, y)], fill="blue", width=2) + prev_point = (x, y) + + draw.line([(margin, 0), (margin, height)], fill="black", width=2) # Y-axis + draw.line( + [(margin, height), (width + margin, height)], fill="black", width=2 + ) # X-axis + + try: + font = ImageFont.truetype("arial.ttf", 12) + except IOError: + font = ImageFont.load_default() + + # Add axis labels + draw.text((5, height // 2), "Loss", font=font, fill="black") + draw.text((width // 2, height + 10), "Steps", font=font, fill="black") + + # Add min/max loss values + draw.text((margin - 30, 0), f"{max_loss:.2f}", font=font, fill="black") + draw.text( + (margin - 30, height - 10), f"{min_loss:.2f}", font=font, fill="black" + ) + return io.NodeOutput(ui=ui.PreviewImage(img, cls=cls)) + + +class SaveLoRA(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="SaveLoRA_V3", + display_name="Save LoRA Weights _V3", + category="loaders", + is_experimental=True, + is_output_node=True, + inputs=[ + io.LoraModel.Input("lora", tooltip="The LoRA model to save. Do not use the model with LoRA layers."), + io.String.Input("prefix", default="loras/ComfyUI_trained_lora", tooltip="The prefix to use for the saved LoRA file."), + io.Int.Input("steps", tooltip="Optional: The number of steps to LoRA has been trained for, used to name the saved file.", optional=True), + ], + outputs=[], + ) + + @classmethod + def execute(cls, lora, prefix, steps=None): + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path( + prefix, folder_paths.get_output_directory() + ) + if steps is None: + output_checkpoint = f"{filename}_{counter:05}_.safetensors" + else: + output_checkpoint = f"{filename}_{steps}_steps_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + safetensors.torch.save_file(lora, output_checkpoint) + return io.NodeOutput() + + +class TrainLoraNode(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="TrainLoraNode_V3", + display_name="Train LoRA _V3", + category="training", + is_experimental=True, + inputs=[ + io.Model.Input("model", tooltip="The model to train the LoRA on."), + io.Latent.Input("latents", tooltip="The Latents to use for training, serve as dataset/input of the model."), + io.Conditioning.Input("positive", tooltip="The positive conditioning to use for training."), + io.Int.Input("batch_size", default=1, min=1, max=10000, step=1, tooltip="The batch size to use for training."), + io.Int.Input("steps", default=16, min=1, max=100000, tooltip="The number of steps to train the LoRA for."), + io.Float.Input("learning_rate", default=0.0005, min=0.0000001, max=1.0, step=0.000001, tooltip="The learning rate to use for training."), + io.Int.Input("rank", default=8, min=1, max=128, tooltip="The rank of the LoRA layers."), + io.Combo.Input("optimizer", options=["AdamW", "Adam", "SGD", "RMSprop"], default="AdamW", tooltip="The optimizer to use for training."), + io.Combo.Input("loss_function", options=["MSE", "L1", "Huber", "SmoothL1"], default="MSE", tooltip="The loss function to use for training."), + io.Int.Input("seed", default=0, min=0, max=0xFFFFFFFFFFFFFFFF, tooltip="The seed to use for training (used in generator for LoRA weight initialization and noise sampling)"), + io.Combo.Input("training_dtype", options=["bf16", "fp32"], default="bf16", tooltip="The dtype to use for training."), + io.Combo.Input("lora_dtype", options=["bf16", "fp32"], default="bf16", tooltip="The dtype to use for lora."), + io.Combo.Input("existing_lora", options=folder_paths.get_filename_list("loras") + ["[None]"], default="[None]", tooltip="The existing LoRA to append to. Set to None for new LoRA."), + ], + outputs=[ + io.Model.Output(display_name="model_with_lora"), + io.LoraModel.Output(display_name="lora"), + io.LossMap.Output(display_name="loss"), + io.Int.Output(display_name="steps"), + ], + ) + + @classmethod + def execute( + cls, + model, + latents, + positive, + batch_size, + steps, + learning_rate, + rank, + optimizer, + loss_function, + seed, + training_dtype, + lora_dtype, + existing_lora, + ): + mp = model.clone() + dtype = node_helpers.string_to_torch_dtype(training_dtype) + lora_dtype = node_helpers.string_to_torch_dtype(lora_dtype) + mp.set_model_compute_dtype(dtype) + + latents = latents["samples"].to(dtype) + num_images = latents.shape[0] + logging.info(f"Total Images: {num_images}, Total Captions: {len(positive)}") + if len(positive) == 1 and num_images > 1: + positive = positive * num_images + elif len(positive) != num_images: + raise ValueError( + f"Number of positive conditions ({len(positive)}) does not match number of images ({num_images})." + ) + + with torch.inference_mode(False): + lora_sd = {} + generator = torch.Generator() + generator.manual_seed(seed) + + # Load existing LoRA weights if provided + existing_weights = {} + existing_steps = 0 + if existing_lora != "[None]": + lora_path = folder_paths.get_full_path_or_raise("loras", existing_lora) + # Extract steps from filename like "trained_lora_10_steps_20250225_203716" + existing_steps = int(existing_lora.split("_steps_")[0].split("_")[-1]) + if lora_path: + existing_weights = comfy.utils.load_torch_file(lora_path) + + all_weight_adapters = [] + for n, m in mp.model.named_modules(): + if hasattr(m, "weight_function"): + if m.weight is not None: + key = "{}.weight".format(n) + shape = m.weight.shape + if len(shape) >= 2: + alpha = float(existing_weights.get(f"{key}.alpha", 1.0)) + dora_scale = existing_weights.get( + f"{key}.dora_scale", None + ) + for adapter_cls in adapters: + existing_adapter = adapter_cls.load( + n, existing_weights, alpha, dora_scale + ) + if existing_adapter is not None: + break + else: + # If no existing adapter found, use LoRA + # We will add algo option in the future + existing_adapter = None + adapter_cls = adapters[0] + + if existing_adapter is not None: + train_adapter = existing_adapter.to_train().to(lora_dtype) + else: + # Use LoRA with alpha=1.0 by default + train_adapter = adapter_cls.create_train( + m.weight, rank=rank, alpha=1.0 + ).to(lora_dtype) + for name, parameter in train_adapter.named_parameters(): + lora_sd[f"{n}.{name}"] = parameter + + mp.add_weight_wrapper(key, train_adapter) + all_weight_adapters.append(train_adapter) + else: + diff = torch.nn.Parameter( + torch.zeros( + m.weight.shape, dtype=lora_dtype, requires_grad=True + ) + ) + diff_module = BiasDiff(diff) + mp.add_weight_wrapper(key, BiasDiff(diff)) + all_weight_adapters.append(diff_module) + lora_sd["{}.diff".format(n)] = diff + if hasattr(m, "bias") and m.bias is not None: + key = "{}.bias".format(n) + bias = torch.nn.Parameter( + torch.zeros(m.bias.shape, dtype=lora_dtype, requires_grad=True) + ) + bias_module = BiasDiff(bias) + lora_sd["{}.diff_b".format(n)] = bias + mp.add_weight_wrapper(key, BiasDiff(bias)) + all_weight_adapters.append(bias_module) + + if optimizer == "Adam": + optimizer = torch.optim.Adam(lora_sd.values(), lr=learning_rate) + elif optimizer == "AdamW": + optimizer = torch.optim.AdamW(lora_sd.values(), lr=learning_rate) + elif optimizer == "SGD": + optimizer = torch.optim.SGD(lora_sd.values(), lr=learning_rate) + elif optimizer == "RMSprop": + optimizer = torch.optim.RMSprop(lora_sd.values(), lr=learning_rate) + + # Setup loss function based on selection + if loss_function == "MSE": + criterion = torch.nn.MSELoss() + elif loss_function == "L1": + criterion = torch.nn.L1Loss() + elif loss_function == "Huber": + criterion = torch.nn.HuberLoss() + elif loss_function == "SmoothL1": + criterion = torch.nn.SmoothL1Loss() + + # setup models + for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model): + patch(m) + mp.model.requires_grad_(False) + comfy.model_management.load_models_gpu([mp], memory_required=1e20, force_full_load=True) + + # Setup sampler and guider like in test script + loss_map = {"loss": []} + def loss_callback(loss): + loss_map["loss"].append(loss) + train_sampler = TrainSampler( + criterion, + optimizer, + loss_callback=loss_callback, + batch_size=batch_size, + total_steps=steps, + seed=seed, + training_dtype=dtype + ) + guider = comfy_extras.nodes_custom_sampler.Guider_Basic(mp) + guider.set_conds(positive) # Set conditioning from input + + # Training loop + try: + # Generate dummy sigmas and noise + sigmas = torch.tensor(range(num_images)) + noise = comfy_extras.nodes_custom_sampler.Noise_RandomNoise(seed) + guider.sample( + noise.generate_noise({"samples": latents}), + latents, + train_sampler, + sigmas, + seed=noise.seed + ) + finally: + for m in mp.model.modules(): + unpatch(m) + del train_sampler, optimizer + + for adapter in all_weight_adapters: + adapter.requires_grad_(False) + + for param in lora_sd: + lora_sd[param] = lora_sd[param].to(lora_dtype) + + return io.NodeOutput(mp, lora_sd, loss_map, steps + existing_steps) + + +NODES_LIST = [ + LoadImageSetFromFolderNode, + LoadImageTextSetFromFolderNode, + LoraModelLoader, + LossGraphNode, + SaveLoRA, + TrainLoraNode, +] diff --git a/nodes.py b/nodes.py index 296aa0027..e2805463c 100644 --- a/nodes.py +++ b/nodes.py @@ -2350,6 +2350,10 @@ def init_builtin_extra_nodes(): "v3/nodes_sdupscale.py", "v3/nodes_slg.py", "v3/nodes_stable_cascade.py", + "v3/nodes_tcfg.py", + "v3/nodes_tomesd.py", + "v3/nodes_torch_compile.py", + "v3/nodes_train.py", "v3/nodes_upscale_model.py", "v3/nodes_video.py", "v3/nodes_video_model.py", From 66cd5152fd613b4a14580c02f02e881edd0259a2 Mon Sep 17 00:00:00 2001 From: bigcat88 Date: Thu, 24 Jul 2025 15:40:39 +0300 Subject: [PATCH 5/7] apply changes from https://github.com/comfyanonymous/ComfyUI/pull/9015 --- comfy_extras/v3/nodes_train.py | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/comfy_extras/v3/nodes_train.py b/comfy_extras/v3/nodes_train.py index 1c9290bbf..46888f5be 100644 --- a/comfy_extras/v3/nodes_train.py +++ b/comfy_extras/v3/nodes_train.py @@ -17,7 +17,7 @@ import comfy.utils import comfy_extras.nodes_custom_sampler import folder_paths import node_helpers -from comfy.weight_adapter import adapters +from comfy.weight_adapter import adapter_maps, adapters from comfy_api.v3 import io, ui @@ -38,12 +38,13 @@ def make_batch_extra_option_dict(d, indicies, full_size=None): class TrainSampler(comfy.samplers.Sampler): - def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, total_steps=1, seed=0, training_dtype=torch.bfloat16): + def __init__(self, loss_fn, optimizer, loss_callback=None, batch_size=1, grad_acc=1, total_steps=1, seed=0, training_dtype=torch.bfloat16): self.loss_fn = loss_fn self.optimizer = optimizer self.loss_callback = loss_callback self.batch_size = batch_size self.total_steps = total_steps + self.grad_acc = grad_acc self.seed = seed self.training_dtype = training_dtype @@ -90,8 +91,9 @@ class TrainSampler(comfy.samplers.Sampler): self.loss_callback(loss.item()) pbar.set_postfix({"loss": f"{loss.item():.4f}"}) - self.optimizer.step() - self.optimizer.zero_grad() + if (i + 1) % self.grad_acc == 0: + self.optimizer.step() + self.optimizer.zero_grad() torch.cuda.empty_cache() return torch.zeros_like(latent_image) @@ -461,6 +463,7 @@ class TrainLoraNode(io.ComfyNode): io.Latent.Input("latents", tooltip="The Latents to use for training, serve as dataset/input of the model."), io.Conditioning.Input("positive", tooltip="The positive conditioning to use for training."), io.Int.Input("batch_size", default=1, min=1, max=10000, step=1, tooltip="The batch size to use for training."), + io.Int.Input("grad_accumulation_steps", default=1, min=1, max=1024, step=1, tooltip="The number of gradient accumulation steps to use for training."), io.Int.Input("steps", default=16, min=1, max=100000, tooltip="The number of steps to train the LoRA for."), io.Float.Input("learning_rate", default=0.0005, min=0.0000001, max=1.0, step=0.000001, tooltip="The learning rate to use for training."), io.Int.Input("rank", default=8, min=1, max=128, tooltip="The rank of the LoRA layers."), @@ -469,6 +472,8 @@ class TrainLoraNode(io.ComfyNode): io.Int.Input("seed", default=0, min=0, max=0xFFFFFFFFFFFFFFFF, tooltip="The seed to use for training (used in generator for LoRA weight initialization and noise sampling)"), io.Combo.Input("training_dtype", options=["bf16", "fp32"], default="bf16", tooltip="The dtype to use for training."), io.Combo.Input("lora_dtype", options=["bf16", "fp32"], default="bf16", tooltip="The dtype to use for lora."), + io.Combo.Input("algorithm", options=list(adapter_maps.keys()), default=list(adapter_maps.keys())[0], tooltip="The algorithm to use for training."), + io.Boolean.Input("gradient_checkpointing", default=True, tooltip="Use gradient checkpointing for training."), io.Combo.Input("existing_lora", options=folder_paths.get_filename_list("loras") + ["[None]"], default="[None]", tooltip="The existing LoRA to append to. Set to None for new LoRA."), ], outputs=[ @@ -487,6 +492,7 @@ class TrainLoraNode(io.ComfyNode): positive, batch_size, steps, + grad_accumulation_steps, learning_rate, rank, optimizer, @@ -494,6 +500,8 @@ class TrainLoraNode(io.ComfyNode): seed, training_dtype, lora_dtype, + algorithm, + gradient_checkpointing, existing_lora, ): mp = model.clone() @@ -544,10 +552,8 @@ class TrainLoraNode(io.ComfyNode): if existing_adapter is not None: break else: - # If no existing adapter found, use LoRA - # We will add algo option in the future existing_adapter = None - adapter_cls = adapters[0] + adapter_cls = adapter_maps[algorithm] if existing_adapter is not None: train_adapter = existing_adapter.to_train().to(lora_dtype) @@ -601,8 +607,9 @@ class TrainLoraNode(io.ComfyNode): criterion = torch.nn.SmoothL1Loss() # setup models - for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model): - patch(m) + if gradient_checkpointing: + for m in find_all_highest_child_module_with_forward(mp.model.diffusion_model): + patch(m) mp.model.requires_grad_(False) comfy.model_management.load_models_gpu([mp], memory_required=1e20, force_full_load=True) @@ -615,7 +622,8 @@ class TrainLoraNode(io.ComfyNode): optimizer, loss_callback=loss_callback, batch_size=batch_size, - total_steps=steps, + grad_acc=grad_accumulation_steps, + total_steps=steps * grad_accumulation_steps, seed=seed, training_dtype=dtype ) From f5698237381b7d7bcaffb35fa4c3d44532597dc3 Mon Sep 17 00:00:00 2001 From: bigcat88 Date: Thu, 24 Jul 2025 22:03:50 +0300 Subject: [PATCH 6/7] pass "id" in Schema inputs as an arg instead of kwarg --- comfy_extras/v3/nodes_audio.py | 22 ++--- .../v3/nodes_differential_diffusion.py | 2 +- comfy_extras/v3/nodes_flux.py | 16 ++-- comfy_extras/v3/nodes_freelunch.py | 20 ++--- comfy_extras/v3/nodes_fresca.py | 8 +- comfy_extras/v3/nodes_gits.py | 6 +- comfy_extras/v3/nodes_hypertile.py | 10 +-- comfy_extras/v3/nodes_ip2p.py | 8 +- comfy_extras/v3/nodes_latent.py | 42 ++++----- comfy_extras/v3/nodes_load_3d.py | 24 ++--- comfy_extras/v3/nodes_lora_extract.py | 8 +- comfy_extras/v3/nodes_lt.py | 88 +++++++++---------- comfy_extras/v3/nodes_lumina2.py | 12 +-- comfy_extras/v3/nodes_morphology.py | 20 ++--- comfy_extras/v3/nodes_optimalsteps.py | 6 +- comfy_extras/v3/nodes_pag.py | 4 +- comfy_extras/v3/nodes_perpneg.py | 12 +-- 17 files changed, 154 insertions(+), 154 deletions(-) diff --git a/comfy_extras/v3/nodes_audio.py b/comfy_extras/v3/nodes_audio.py index dc7659807..68b841d3a 100644 --- a/comfy_extras/v3/nodes_audio.py +++ b/comfy_extras/v3/nodes_audio.py @@ -19,14 +19,14 @@ class ConditioningStableAudio(io.ComfyNode): node_id="ConditioningStableAudio_V3", category="conditioning", inputs=[ - io.Conditioning.Input(id="positive"), - io.Conditioning.Input(id="negative"), - io.Float.Input(id="seconds_start", default=0.0, min=0.0, max=1000.0, step=0.1), - io.Float.Input(id="seconds_total", default=47.0, min=0.0, max=1000.0, step=0.1), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Float.Input("seconds_start", default=0.0, min=0.0, max=1000.0, step=0.1), + io.Float.Input("seconds_total", default=47.0, min=0.0, max=1000.0, step=0.1), ], outputs=[ - io.Conditioning.Output(id="positive_out", display_name="positive"), - io.Conditioning.Output(id="negative_out", display_name="negative"), + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), ], ) @@ -49,7 +49,7 @@ class EmptyLatentAudio(io.ComfyNode): node_id="EmptyLatentAudio_V3", category="latent/audio", inputs=[ - io.Float.Input(id="seconds", default=47.6, min=1.0, max=1000.0, step=0.1), + io.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1), io.Int.Input( id="batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch." ), @@ -200,8 +200,8 @@ class VAEDecodeAudio(io.ComfyNode): node_id="VAEDecodeAudio_V3", category="latent/audio", inputs=[ - io.Latent.Input(id="samples"), - io.Vae.Input(id="vae"), + io.Latent.Input("samples"), + io.Vae.Input("vae"), ], outputs=[io.Audio.Output()], ) @@ -222,8 +222,8 @@ class VAEEncodeAudio(io.ComfyNode): node_id="VAEEncodeAudio_V3", category="latent/audio", inputs=[ - io.Audio.Input(id="audio"), - io.Vae.Input(id="vae"), + io.Audio.Input("audio"), + io.Vae.Input("vae"), ], outputs=[io.Latent.Output()], ) diff --git a/comfy_extras/v3/nodes_differential_diffusion.py b/comfy_extras/v3/nodes_differential_diffusion.py index 3af5381b1..58906b2fa 100644 --- a/comfy_extras/v3/nodes_differential_diffusion.py +++ b/comfy_extras/v3/nodes_differential_diffusion.py @@ -13,7 +13,7 @@ class DifferentialDiffusion(io.ComfyNode): display_name="Differential Diffusion _V3", category="_for_testing", inputs=[ - io.Model.Input(id="model"), + io.Model.Input("model"), ], outputs=[ io.Model.Output(), diff --git a/comfy_extras/v3/nodes_flux.py b/comfy_extras/v3/nodes_flux.py index f2f4a118e..266fdfdd4 100644 --- a/comfy_extras/v3/nodes_flux.py +++ b/comfy_extras/v3/nodes_flux.py @@ -32,10 +32,10 @@ class CLIPTextEncodeFlux(io.ComfyNode): node_id="CLIPTextEncodeFlux_V3", category="advanced/conditioning/flux", inputs=[ - io.Clip.Input(id="clip"), - io.String.Input(id="clip_l", multiline=True, dynamic_prompts=True), - io.String.Input(id="t5xxl", multiline=True, dynamic_prompts=True), - io.Float.Input(id="guidance", default=3.5, min=0.0, max=100.0, step=0.1), + io.Clip.Input("clip"), + io.String.Input("clip_l", multiline=True, dynamic_prompts=True), + io.String.Input("t5xxl", multiline=True, dynamic_prompts=True), + io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1), ], outputs=[ io.Conditioning.Output(), @@ -58,7 +58,7 @@ class FluxDisableGuidance(io.ComfyNode): category="advanced/conditioning/flux", description="This node completely disables the guidance embed on Flux and Flux like models", inputs=[ - io.Conditioning.Input(id="conditioning"), + io.Conditioning.Input("conditioning"), ], outputs=[ io.Conditioning.Output(), @@ -78,8 +78,8 @@ class FluxGuidance(io.ComfyNode): node_id="FluxGuidance_V3", category="advanced/conditioning/flux", inputs=[ - io.Conditioning.Input(id="conditioning"), - io.Float.Input(id="guidance", default=3.5, min=0.0, max=100.0, step=0.1), + io.Conditioning.Input("conditioning"), + io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1), ], outputs=[ io.Conditioning.Output(), @@ -100,7 +100,7 @@ class FluxKontextImageScale(io.ComfyNode): category="advanced/conditioning/flux", description="This node resizes the image to one that is more optimal for flux kontext.", inputs=[ - io.Image.Input(id="image"), + io.Image.Input("image"), ], outputs=[ io.Image.Output(), diff --git a/comfy_extras/v3/nodes_freelunch.py b/comfy_extras/v3/nodes_freelunch.py index e829a8fdd..c7d71c0a0 100644 --- a/comfy_extras/v3/nodes_freelunch.py +++ b/comfy_extras/v3/nodes_freelunch.py @@ -35,11 +35,11 @@ class FreeU(io.ComfyNode): node_id="FreeU_V3", category="model_patches/unet", inputs=[ - io.Model.Input(id="model"), - io.Float.Input(id="b1", default=1.1, min=0.0, max=10.0, step=0.01), - io.Float.Input(id="b2", default=1.2, min=0.0, max=10.0, step=0.01), - io.Float.Input(id="s1", default=0.9, min=0.0, max=10.0, step=0.01), - io.Float.Input(id="s2", default=0.2, min=0.0, max=10.0, step=0.01), + io.Model.Input("model"), + io.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01), + io.Float.Input("b2", default=1.2, min=0.0, max=10.0, step=0.01), + io.Float.Input("s1", default=0.9, min=0.0, max=10.0, step=0.01), + io.Float.Input("s2", default=0.2, min=0.0, max=10.0, step=0.01), ], outputs=[ io.Model.Output(), @@ -80,11 +80,11 @@ class FreeU_V2(io.ComfyNode): node_id="FreeU_V2_V3", category="model_patches/unet", inputs=[ - io.Model.Input(id="model"), - io.Float.Input(id="b1", default=1.3, min=0.0, max=10.0, step=0.01), - io.Float.Input(id="b2", default=1.4, min=0.0, max=10.0, step=0.01), - io.Float.Input(id="s1", default=0.9, min=0.0, max=10.0, step=0.01), - io.Float.Input(id="s2", default=0.2, min=0.0, max=10.0, step=0.01), + io.Model.Input("model"), + io.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01), + io.Float.Input("b2", default=1.4, min=0.0, max=10.0, step=0.01), + io.Float.Input("s1", default=0.9, min=0.0, max=10.0, step=0.01), + io.Float.Input("s2", default=0.2, min=0.0, max=10.0, step=0.01), ], outputs=[ io.Model.Output(), diff --git a/comfy_extras/v3/nodes_fresca.py b/comfy_extras/v3/nodes_fresca.py index eeae79ec2..4d890eef4 100644 --- a/comfy_extras/v3/nodes_fresca.py +++ b/comfy_extras/v3/nodes_fresca.py @@ -65,12 +65,12 @@ class FreSca(io.ComfyNode): category="_for_testing", description="Applies frequency-dependent scaling to the guidance", inputs=[ - io.Model.Input(id="model"), - io.Float.Input(id="scale_low", default=1.0, min=0, max=10, step=0.01, + io.Model.Input("model"), + io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01, tooltip="Scaling factor for low-frequency components"), - io.Float.Input(id="scale_high", default=1.25, min=0, max=10, step=0.01, + io.Float.Input("scale_high", default=1.25, min=0, max=10, step=0.01, tooltip="Scaling factor for high-frequency components"), - io.Int.Input(id="freq_cutoff", default=20, min=1, max=10000, step=1, + io.Int.Input("freq_cutoff", default=20, min=1, max=10000, step=1, tooltip="Number of frequency indices around center to consider as low-frequency"), ], outputs=[ diff --git a/comfy_extras/v3/nodes_gits.py b/comfy_extras/v3/nodes_gits.py index 211241628..0d774a96e 100644 --- a/comfy_extras/v3/nodes_gits.py +++ b/comfy_extras/v3/nodes_gits.py @@ -343,9 +343,9 @@ class GITSScheduler(io.ComfyNode): node_id="GITSScheduler_V3", category="sampling/custom_sampling/schedulers", inputs=[ - io.Float.Input(id="coeff", default=1.20, min=0.80, max=1.50, step=0.05), - io.Int.Input(id="steps", default=10, min=2, max=1000), - io.Float.Input(id="denoise", default=1.0, min=0.0, max=1.0, step=0.01), + io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05), + io.Int.Input("steps", default=10, min=2, max=1000), + io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), ], outputs=[ io.Sigmas.Output(), diff --git a/comfy_extras/v3/nodes_hypertile.py b/comfy_extras/v3/nodes_hypertile.py index bf6ea11ce..e1c9bb503 100644 --- a/comfy_extras/v3/nodes_hypertile.py +++ b/comfy_extras/v3/nodes_hypertile.py @@ -33,11 +33,11 @@ class HyperTile(io.ComfyNode): node_id="HyperTile_V3", category="model_patches/unet", inputs=[ - io.Model.Input(id="model"), - io.Int.Input(id="tile_size", default=256, min=1, max=2048), - io.Int.Input(id="swap_size", default=2, min=1, max=128), - io.Int.Input(id="max_depth", default=0, min=0, max=10), - io.Boolean.Input(id="scale_depth", default=False), + io.Model.Input("model"), + io.Int.Input("tile_size", default=256, min=1, max=2048), + io.Int.Input("swap_size", default=2, min=1, max=128), + io.Int.Input("max_depth", default=0, min=0, max=10), + io.Boolean.Input("scale_depth", default=False), ], outputs=[ io.Model.Output(), diff --git a/comfy_extras/v3/nodes_ip2p.py b/comfy_extras/v3/nodes_ip2p.py index 79d9c2697..7789c496a 100644 --- a/comfy_extras/v3/nodes_ip2p.py +++ b/comfy_extras/v3/nodes_ip2p.py @@ -12,10 +12,10 @@ class InstructPixToPixConditioning(io.ComfyNode): node_id="InstructPixToPixConditioning_V3", category="conditioning/instructpix2pix", inputs=[ - io.Conditioning.Input(id="positive"), - io.Conditioning.Input(id="negative"), - io.Vae.Input(id="vae"), - io.Image.Input(id="pixels"), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Image.Input("pixels"), ], outputs=[ io.Conditioning.Output(display_name="positive"), diff --git a/comfy_extras/v3/nodes_latent.py b/comfy_extras/v3/nodes_latent.py index 13d551eb7..ab4d690eb 100644 --- a/comfy_extras/v3/nodes_latent.py +++ b/comfy_extras/v3/nodes_latent.py @@ -24,8 +24,8 @@ class LatentAdd(io.ComfyNode): node_id="LatentAdd_V3", category="latent/advanced", inputs=[ - io.Latent.Input(id="samples1"), - io.Latent.Input(id="samples2"), + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), ], outputs=[ io.Latent.Output(), @@ -52,8 +52,8 @@ class LatentApplyOperation(io.ComfyNode): category="latent/advanced/operations", is_experimental=True, inputs=[ - io.Latent.Input(id="samples"), - io.LatentOperation.Input(id="operation"), + io.Latent.Input("samples"), + io.LatentOperation.Input("operation"), ], outputs=[ io.Latent.Output(), @@ -77,8 +77,8 @@ class LatentApplyOperationCFG(io.ComfyNode): category="latent/advanced/operations", is_experimental=True, inputs=[ - io.Model.Input(id="model"), - io.LatentOperation.Input(id="operation"), + io.Model.Input("model"), + io.LatentOperation.Input("operation"), ], outputs=[ io.Model.Output(), @@ -108,8 +108,8 @@ class LatentBatch(io.ComfyNode): node_id="LatentBatch_V3", category="latent/batch", inputs=[ - io.Latent.Input(id="samples1"), - io.Latent.Input(id="samples2"), + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), ], outputs=[ io.Latent.Output(), @@ -137,8 +137,8 @@ class LatentBatchSeedBehavior(io.ComfyNode): node_id="LatentBatchSeedBehavior_V3", category="latent/advanced", inputs=[ - io.Latent.Input(id="samples"), - io.Combo.Input(id="seed_behavior", options=["random", "fixed"], default="fixed"), + io.Latent.Input("samples"), + io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"), ], outputs=[ io.Latent.Output(), @@ -166,9 +166,9 @@ class LatentInterpolate(io.ComfyNode): node_id="LatentInterpolate_V3", category="latent/advanced", inputs=[ - io.Latent.Input(id="samples1"), - io.Latent.Input(id="samples2"), - io.Float.Input(id="ratio", default=1.0, min=0.0, max=1.0, step=0.01), + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), + io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01), ], outputs=[ io.Latent.Output(), @@ -205,8 +205,8 @@ class LatentMultiply(io.ComfyNode): node_id="LatentMultiply_V3", category="latent/advanced", inputs=[ - io.Latent.Input(id="samples"), - io.Float.Input(id="multiplier", default=1.0, min=-10.0, max=10.0, step=0.01), + io.Latent.Input("samples"), + io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01), ], outputs=[ io.Latent.Output(), @@ -230,9 +230,9 @@ class LatentOperationSharpen(io.ComfyNode): category="latent/advanced/operations", is_experimental=True, inputs=[ - io.Int.Input(id="sharpen_radius", default=9, min=1, max=31, step=1), - io.Float.Input(id="sigma", default=1.0, min=0.1, max=10.0, step=0.1), - io.Float.Input(id="alpha", default=0.1, min=0.0, max=5.0, step=0.01), + io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1), + io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1), + io.Float.Input("alpha", default=0.1, min=0.0, max=5.0, step=0.01), ], outputs=[ io.LatentOperation.Output(), @@ -272,7 +272,7 @@ class LatentOperationTonemapReinhard(io.ComfyNode): category="latent/advanced/operations", is_experimental=True, inputs=[ - io.Float.Input(id="multiplier", default=1.0, min=0.0, max=100.0, step=0.01), + io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01), ], outputs=[ io.LatentOperation.Output(), @@ -306,8 +306,8 @@ class LatentSubtract(io.ComfyNode): node_id="LatentSubtract_V3", category="latent/advanced", inputs=[ - io.Latent.Input(id="samples1"), - io.Latent.Input(id="samples2"), + io.Latent.Input("samples1"), + io.Latent.Input("samples2"), ], outputs=[ io.Latent.Output(), diff --git a/comfy_extras/v3/nodes_load_3d.py b/comfy_extras/v3/nodes_load_3d.py index e7a82ccf3..af12b497a 100644 --- a/comfy_extras/v3/nodes_load_3d.py +++ b/comfy_extras/v3/nodes_load_3d.py @@ -35,10 +35,10 @@ class Load3D(io.ComfyNode): category="3d", is_experimental=True, inputs=[ - io.Combo.Input(id="model_file", options=sorted(files), upload=io.UploadType.model), - io.Load3D.Input(id="image"), - io.Int.Input(id="width", default=1024, min=1, max=4096, step=1), - io.Int.Input(id="height", default=1024, min=1, max=4096, step=1), + io.Combo.Input("model_file", options=sorted(files), upload=io.UploadType.model), + io.Load3D.Input("image"), + io.Int.Input("width", default=1024, min=1, max=4096, step=1), + io.Int.Input("height", default=1024, min=1, max=4096, step=1), ], outputs=[ io.Image.Output(display_name="image"), @@ -96,10 +96,10 @@ class Load3DAnimation(io.ComfyNode): category="3d", is_experimental=True, inputs=[ - io.Combo.Input(id="model_file", options=sorted(files), upload=io.UploadType.model), - io.Load3DAnimation.Input(id="image"), - io.Int.Input(id="width", default=1024, min=1, max=4096, step=1), - io.Int.Input(id="height", default=1024, min=1, max=4096, step=1), + io.Combo.Input("model_file", options=sorted(files), upload=io.UploadType.model), + io.Load3DAnimation.Input("image"), + io.Int.Input("width", default=1024, min=1, max=4096, step=1), + io.Int.Input("height", default=1024, min=1, max=4096, step=1), ], outputs=[ io.Image.Output(display_name="image"), @@ -140,8 +140,8 @@ class Preview3D(io.ComfyNode): is_experimental=True, is_output_node=True, inputs=[ - io.String.Input(id="model_file", default="", multiline=False), - io.Load3DCamera.Input(id="camera_info", optional=True), + io.String.Input("model_file", default="", multiline=False), + io.Load3DCamera.Input("camera_info", optional=True), ], outputs=[], ) @@ -161,8 +161,8 @@ class Preview3DAnimation(io.ComfyNode): is_experimental=True, is_output_node=True, inputs=[ - io.String.Input(id="model_file", default="", multiline=False), - io.Load3DCamera.Input(id="camera_info", optional=True), + io.String.Input("model_file", default="", multiline=False), + io.Load3DCamera.Input("camera_info", optional=True), ], outputs=[], ) diff --git a/comfy_extras/v3/nodes_lora_extract.py b/comfy_extras/v3/nodes_lora_extract.py index 180b62007..197740515 100644 --- a/comfy_extras/v3/nodes_lora_extract.py +++ b/comfy_extras/v3/nodes_lora_extract.py @@ -91,10 +91,10 @@ class LoraSave(io.ComfyNode): category="_for_testing", is_output_node=True, inputs=[ - io.String.Input(id="filename_prefix", default="loras/ComfyUI_extracted_lora"), - io.Int.Input(id="rank", default=8, min=1, max=4096, step=1), - io.Combo.Input(id="lora_type", options=list(LORA_TYPES.keys())), - io.Boolean.Input(id="bias_diff", default=True), + io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"), + io.Int.Input("rank", default=8, min=1, max=4096, step=1), + io.Combo.Input("lora_type", options=list(LORA_TYPES.keys())), + io.Boolean.Input("bias_diff", default=True), io.Model.Input( id="model_diff", optional=True, tooltip="The ModelSubtract output to be converted to a lora." ), diff --git a/comfy_extras/v3/nodes_lt.py b/comfy_extras/v3/nodes_lt.py index b1eefcb84..abb3a932b 100644 --- a/comfy_extras/v3/nodes_lt.py +++ b/comfy_extras/v3/nodes_lt.py @@ -93,10 +93,10 @@ class EmptyLTXVLatentVideo(io.ComfyNode): node_id="EmptyLTXVLatentVideo_V3", category="latent/video/ltxv", inputs=[ - io.Int.Input(id="width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), - io.Int.Input(id="height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), - io.Int.Input(id="length", default=97, min=1, max=nodes.MAX_RESOLUTION, step=8), - io.Int.Input(id="batch_size", default=1, min=1, max=4096), + io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("length", default=97, min=1, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("batch_size", default=1, min=1, max=4096), ], outputs=[ io.Latent.Output(), @@ -122,10 +122,10 @@ class LTXVAddGuide(io.ComfyNode): node_id="LTXVAddGuide_V3", category="conditioning/video_models", inputs=[ - io.Conditioning.Input(id="positive"), - io.Conditioning.Input(id="negative"), - io.Vae.Input(id="vae"), - io.Latent.Input(id="latent"), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Latent.Input("latent"), io.Image.Input( id="image", tooltip="Image or video to condition the latent video on. Must be 8*n + 1 frames. " @@ -141,12 +141,12 @@ class LTXVAddGuide(io.ComfyNode): "For videos with 9+ frames, frame_idx must be divisible by 8, otherwise it will be rounded " "down to the nearest multiple of 8. Negative values are counted from the end of the video.", ), - io.Float.Input(id="strength", default=1.0, min=0.0, max=1.0, step=0.01), + io.Float.Input("strength", default=1.0, min=0.0, max=1.0, step=0.01), ], outputs=[ - io.Conditioning.Output(id="positive_out", display_name="positive"), - io.Conditioning.Output(id="negative_out", display_name="negative"), - io.Latent.Output(id="latent_out", display_name="latent"), + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), ], ) @@ -282,13 +282,13 @@ class LTXVConditioning(io.ComfyNode): node_id="LTXVConditioning_V3", category="conditioning/video_models", inputs=[ - io.Conditioning.Input(id="positive"), - io.Conditioning.Input(id="negative"), - io.Float.Input(id="frame_rate", default=25.0, min=0.0, max=1000.0, step=0.01), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Float.Input("frame_rate", default=25.0, min=0.0, max=1000.0, step=0.01), ], outputs=[ - io.Conditioning.Output(id="positive_out", display_name="positive"), - io.Conditioning.Output(id="negative_out", display_name="negative"), + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), ], ) @@ -306,14 +306,14 @@ class LTXVCropGuides(io.ComfyNode): node_id="LTXVCropGuides_V3", category="conditioning/video_models", inputs=[ - io.Conditioning.Input(id="positive"), - io.Conditioning.Input(id="negative"), - io.Latent.Input(id="latent"), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Latent.Input("latent"), ], outputs=[ - io.Conditioning.Output(id="positive_out", display_name="positive"), - io.Conditioning.Output(id="negative_out", display_name="negative"), - io.Latent.Output(id="latent_out", display_name="latent"), + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), ], ) @@ -342,19 +342,19 @@ class LTXVImgToVideo(io.ComfyNode): node_id="LTXVImgToVideo_V3", category="conditioning/video_models", inputs=[ - io.Conditioning.Input(id="positive"), - io.Conditioning.Input(id="negative"), - io.Vae.Input(id="vae"), - io.Image.Input(id="image"), - io.Int.Input(id="width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), - io.Int.Input(id="height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), - io.Int.Input(id="length", default=97, min=9, max=nodes.MAX_RESOLUTION, step=8), - io.Int.Input(id="batch_size", default=1, min=1, max=4096), - io.Float.Input(id="strength", default=1.0, min=0.0, max=1.0), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Image.Input("image"), + io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), + io.Int.Input("length", default=97, min=9, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("batch_size", default=1, min=1, max=4096), + io.Float.Input("strength", default=1.0, min=0.0, max=1.0), ], outputs=[ - io.Conditioning.Output(id="positive_out", display_name="positive"), - io.Conditioning.Output(id="negative_out", display_name="negative"), + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), io.Latent.Output(display_name="latent"), ], ) @@ -390,13 +390,13 @@ class LTXVPreprocess(io.ComfyNode): node_id="LTXVPreprocess_V3", category="image", inputs=[ - io.Image.Input(id="image"), + io.Image.Input("image"), io.Int.Input( id="img_compression", default=35, min=0, max=100, tooltip="Amount of compression to apply on image." ), ], outputs=[ - io.Image.Output(id="output_image", display_name="output_image"), + io.Image.Output(display_name="output_image"), ], ) @@ -415,9 +415,9 @@ class LTXVScheduler(io.ComfyNode): node_id="LTXVScheduler_V3", category="sampling/custom_sampling/schedulers", inputs=[ - io.Int.Input(id="steps", default=20, min=1, max=10000), - io.Float.Input(id="max_shift", default=2.05, min=0.0, max=100.0, step=0.01), - io.Float.Input(id="base_shift", default=0.95, min=0.0, max=100.0, step=0.01), + io.Int.Input("steps", default=20, min=1, max=10000), + io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01), + io.Float.Input("base_shift", default=0.95, min=0.0, max=100.0, step=0.01), io.Boolean.Input( id="stretch", default=True, @@ -431,7 +431,7 @@ class LTXVScheduler(io.ComfyNode): step=0.01, tooltip="The terminal value of the sigmas after stretching.", ), - io.Latent.Input(id="latent", optional=True), + io.Latent.Input("latent", optional=True), ], outputs=[ io.Sigmas.Output(), @@ -478,10 +478,10 @@ class ModelSamplingLTXV(io.ComfyNode): node_id="ModelSamplingLTXV_V3", category="advanced/model", inputs=[ - io.Model.Input(id="model"), - io.Float.Input(id="max_shift", default=2.05, min=0.0, max=100.0, step=0.01), - io.Float.Input(id="base_shift", default=0.95, min=0.0, max=100.0, step=0.01), - io.Latent.Input(id="latent", optional=True), + io.Model.Input("model"), + io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01), + io.Float.Input("base_shift", default=0.95, min=0.0, max=100.0, step=0.01), + io.Latent.Input("latent", optional=True), ], outputs=[ io.Model.Output(), diff --git a/comfy_extras/v3/nodes_lumina2.py b/comfy_extras/v3/nodes_lumina2.py index 66ea981fc..4dadce477 100644 --- a/comfy_extras/v3/nodes_lumina2.py +++ b/comfy_extras/v3/nodes_lumina2.py @@ -27,9 +27,9 @@ class CLIPTextEncodeLumina2(io.ComfyNode): description="Encodes a system prompt and a user prompt using a CLIP model into an embedding " "that can be used to guide the diffusion model towards generating specific images.", inputs=[ - io.Combo.Input(id="system_prompt", options=list(cls.SYSTEM_PROMPT.keys()), tooltip=cls.SYSTEM_PROMPT_TIP), - io.String.Input(id="user_prompt", multiline=True, dynamic_prompts=True, tooltip="The text to be encoded."), - io.Clip.Input(id="clip", tooltip="The CLIP model used for encoding the text."), + io.Combo.Input("system_prompt", options=list(cls.SYSTEM_PROMPT.keys()), tooltip=cls.SYSTEM_PROMPT_TIP), + io.String.Input("user_prompt", multiline=True, dynamic_prompts=True, tooltip="The text to be encoded."), + io.Clip.Input("clip", tooltip="The CLIP model used for encoding the text."), ], outputs=[ io.Conditioning.Output(tooltip="A conditioning containing the embedded text used to guide the diffusion model."), @@ -56,9 +56,9 @@ class RenormCFG(io.ComfyNode): node_id="RenormCFG_V3", category="advanced/model", inputs=[ - io.Model.Input(id="model"), - io.Float.Input(id="cfg_trunc", default=100, min=0.0, max=100.0, step=0.01), - io.Float.Input(id="renorm_cfg", default=1.0, min=0.0, max=100.0, step=0.01), + io.Model.Input("model"), + io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01), + io.Float.Input("renorm_cfg", default=1.0, min=0.0, max=100.0, step=0.01), ], outputs=[ io.Model.Output(), diff --git a/comfy_extras/v3/nodes_morphology.py b/comfy_extras/v3/nodes_morphology.py index 5b3577973..7c44cc086 100644 --- a/comfy_extras/v3/nodes_morphology.py +++ b/comfy_extras/v3/nodes_morphology.py @@ -23,12 +23,12 @@ class ImageRGBToYUV(io.ComfyNode): node_id="ImageRGBToYUV_V3", category="image/batch", inputs=[ - io.Image.Input(id="image"), + io.Image.Input("image"), ], outputs=[ - io.Image.Output(id="Y", display_name="Y"), - io.Image.Output(id="U", display_name="U"), - io.Image.Output(id="V", display_name="V"), + io.Image.Output(display_name="Y"), + io.Image.Output(display_name="U"), + io.Image.Output(display_name="V"), ], ) @@ -45,9 +45,9 @@ class ImageYUVToRGB(io.ComfyNode): node_id="ImageYUVToRGB_V3", category="image/batch", inputs=[ - io.Image.Input(id="Y"), - io.Image.Input(id="U"), - io.Image.Input(id="V"), + io.Image.Input("Y"), + io.Image.Input("U"), + io.Image.Input("V"), ], outputs=[ io.Image.Output(), @@ -68,9 +68,9 @@ class Morphology(io.ComfyNode): display_name="ImageMorphology _V3", category="image/postprocessing", inputs=[ - io.Image.Input(id="image"), - io.Combo.Input(id="operation", options=["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"]), - io.Int.Input(id="kernel_size", default=3, min=3, max=999, step=1), + io.Image.Input("image"), + io.Combo.Input("operation", options=["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"]), + io.Int.Input("kernel_size", default=3, min=3, max=999, step=1), ], outputs=[ io.Image.Output(), diff --git a/comfy_extras/v3/nodes_optimalsteps.py b/comfy_extras/v3/nodes_optimalsteps.py index 5dd21b0ff..4dcb79b4d 100644 --- a/comfy_extras/v3/nodes_optimalsteps.py +++ b/comfy_extras/v3/nodes_optimalsteps.py @@ -33,9 +33,9 @@ class OptimalStepsScheduler(io.ComfyNode): node_id="OptimalStepsScheduler_V3", category="sampling/custom_sampling/schedulers", inputs=[ - io.Combo.Input(id="model_type", options=["FLUX", "Wan", "Chroma"]), - io.Int.Input(id="steps", default=20, min=3, max=1000), - io.Float.Input(id="denoise", default=1.0, min=0.0, max=1.0, step=0.01), + io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]), + io.Int.Input("steps", default=20, min=3, max=1000), + io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01), ], outputs=[ io.Sigmas.Output(), diff --git a/comfy_extras/v3/nodes_pag.py b/comfy_extras/v3/nodes_pag.py index 1268343c4..7aebf614a 100644 --- a/comfy_extras/v3/nodes_pag.py +++ b/comfy_extras/v3/nodes_pag.py @@ -17,8 +17,8 @@ class PerturbedAttentionGuidance(io.ComfyNode): node_id="PerturbedAttentionGuidance_V3", category="model_patches/unet", inputs=[ - io.Model.Input(id="model"), - io.Float.Input(id="scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01), + io.Model.Input("model"), + io.Float.Input("scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01), ], outputs=[ io.Model.Output(), diff --git a/comfy_extras/v3/nodes_perpneg.py b/comfy_extras/v3/nodes_perpneg.py index 5ba27698e..d7ee7cfa4 100644 --- a/comfy_extras/v3/nodes_perpneg.py +++ b/comfy_extras/v3/nodes_perpneg.py @@ -88,12 +88,12 @@ class PerpNegGuider(io.ComfyNode): node_id="PerpNegGuider_V3", category="_for_testing", inputs=[ - io.Model.Input(id="model"), - io.Conditioning.Input(id="positive"), - io.Conditioning.Input(id="negative"), - io.Conditioning.Input(id="empty_conditioning"), - io.Float.Input(id="cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01), - io.Float.Input(id="neg_scale", default=1.0, min=0.0, max=100.0, step=0.01), + io.Model.Input("model"), + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Conditioning.Input("empty_conditioning"), + io.Float.Input("cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01), + io.Float.Input("neg_scale", default=1.0, min=0.0, max=100.0, step=0.01), ], outputs=[ io.Guider.Output(), From c3d9243915f0a34fc88bf3890914a93603a93d82 Mon Sep 17 00:00:00 2001 From: bigcat88 Date: Thu, 24 Jul 2025 22:10:35 +0300 Subject: [PATCH 7/7] adjusted input parameters of ui.PreviewUI3D --- comfy_api/v3/ui.py | 7 ++++--- comfy_extras/v3/nodes_load_3d.py | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/comfy_api/v3/ui.py b/comfy_api/v3/ui.py index 644bc4ca3..4094812e0 100644 --- a/comfy_api/v3/ui.py +++ b/comfy_api/v3/ui.py @@ -475,11 +475,12 @@ class PreviewVideo(_UIOutput): class PreviewUI3D(_UIOutput): - def __init__(self, values: list[SavedResult | dict], **kwargs): - self.values = values + def __init__(self, model_file, camera_info, **kwargs): + self.model_file = model_file + self.camera_info = camera_info def as_dict(self): - return {"result": self.values} + return {"result": [self.model_file, self.camera_info]} class PreviewText(_UIOutput): diff --git a/comfy_extras/v3/nodes_load_3d.py b/comfy_extras/v3/nodes_load_3d.py index af12b497a..0068da40f 100644 --- a/comfy_extras/v3/nodes_load_3d.py +++ b/comfy_extras/v3/nodes_load_3d.py @@ -148,7 +148,7 @@ class Preview3D(io.ComfyNode): @classmethod def execute(cls, model_file, camera_info=None): - return io.NodeOutput(ui=ui.PreviewUI3D([model_file, camera_info], cls=cls)) + return io.NodeOutput(ui=ui.PreviewUI3D(model_file, camera_info, cls=cls)) class Preview3DAnimation(io.ComfyNode): @@ -169,7 +169,7 @@ class Preview3DAnimation(io.ComfyNode): @classmethod def execute(cls, model_file, camera_info=None): - return io.NodeOutput(ui=ui.PreviewUI3D([model_file, camera_info], cls=cls)) + return io.NodeOutput(ui=ui.PreviewUI3D(model_file, camera_info, cls=cls)) NODES_LIST = [