v3 nodes (part a) (#9149)

This commit is contained in:
Alexander Piskun
2025-08-22 05:05:36 +03:00
committed by GitHub
parent bc49106837
commit bab08f40d1
4 changed files with 239 additions and 155 deletions

View File

@@ -1,49 +1,63 @@
import torch
from typing_extensions import override
import comfy.model_management
import node_helpers
from comfy_api.latest import ComfyExtension, io
class TextEncodeAceStepAudio:
class TextEncodeAceStepAudio(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": {
"clip": ("CLIP", ),
"tags": ("STRING", {"multiline": True, "dynamicPrompts": True}),
"lyrics": ("STRING", {"multiline": True, "dynamicPrompts": True}),
"lyrics_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "encode"
def define_schema(cls):
return io.Schema(
node_id="TextEncodeAceStepAudio",
category="conditioning",
inputs=[
io.Clip.Input("clip"),
io.String.Input("tags", multiline=True, dynamic_prompts=True),
io.String.Input("lyrics", multiline=True, dynamic_prompts=True),
io.Float.Input("lyrics_strength", default=1.0, min=0.0, max=10.0, step=0.01),
],
outputs=[io.Conditioning.Output()],
)
CATEGORY = "conditioning"
def encode(self, clip, tags, lyrics, lyrics_strength):
@classmethod
def execute(cls, clip, tags, lyrics, lyrics_strength) -> io.NodeOutput:
tokens = clip.tokenize(tags, lyrics=lyrics)
conditioning = clip.encode_from_tokens_scheduled(tokens)
conditioning = node_helpers.conditioning_set_values(conditioning, {"lyrics_strength": lyrics_strength})
return (conditioning, )
return io.NodeOutput(conditioning)
class EmptyAceStepLatentAudio:
def __init__(self):
self.device = comfy.model_management.intermediate_device()
class EmptyAceStepLatentAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="EmptyAceStepLatentAudio",
category="latent/audio",
inputs=[
io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1),
io.Int.Input(
"batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."
),
],
outputs=[io.Latent.Output()],
)
@classmethod
def INPUT_TYPES(s):
return {"required": {"seconds": ("FLOAT", {"default": 120.0, "min": 1.0, "max": 1000.0, "step": 0.1}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "generate"
CATEGORY = "latent/audio"
def generate(self, seconds, batch_size):
def execute(cls, seconds, batch_size) -> io.NodeOutput:
length = int(seconds * 44100 / 512 / 8)
latent = torch.zeros([batch_size, 8, 16, length], device=self.device)
return ({"samples": latent, "type": "audio"}, )
latent = torch.zeros([batch_size, 8, 16, length], device=comfy.model_management.intermediate_device())
return io.NodeOutput({"samples": latent, "type": "audio"})
NODE_CLASS_MAPPINGS = {
"TextEncodeAceStepAudio": TextEncodeAceStepAudio,
"EmptyAceStepLatentAudio": EmptyAceStepLatentAudio,
}
class AceExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
TextEncodeAceStepAudio,
EmptyAceStepLatentAudio,
]
async def comfy_entrypoint() -> AceExtension:
return AceExtension()

View File

@@ -1,8 +1,13 @@
import numpy as np
import torch
from tqdm.auto import trange
from typing_extensions import override
import comfy.model_patcher
import comfy.samplers
import comfy.utils
import torch
import numpy as np
from tqdm.auto import trange
from comfy.k_diffusion.sampling import to_d
from comfy_api.latest import ComfyExtension, io
@torch.no_grad()
@@ -33,30 +38,29 @@ def sample_lcm_upscale(model, x, sigmas, extra_args=None, callback=None, disable
return x
class SamplerLCMUpscale:
upscale_methods = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"]
class SamplerLCMUpscale(io.ComfyNode):
UPSCALE_METHODS = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"]
@classmethod
def INPUT_TYPES(s):
return {"required":
{"scale_ratio": ("FLOAT", {"default": 1.0, "min": 0.1, "max": 20.0, "step": 0.01}),
"scale_steps": ("INT", {"default": -1, "min": -1, "max": 1000, "step": 1}),
"upscale_method": (s.upscale_methods,),
}
}
RETURN_TYPES = ("SAMPLER",)
CATEGORY = "sampling/custom_sampling/samplers"
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="SamplerLCMUpscale",
category="sampling/custom_sampling/samplers",
inputs=[
io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01),
io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1),
io.Combo.Input("upscale_method", options=cls.UPSCALE_METHODS),
],
outputs=[io.Sampler.Output()],
)
FUNCTION = "get_sampler"
def get_sampler(self, scale_ratio, scale_steps, upscale_method):
@classmethod
def execute(cls, scale_ratio, scale_steps, upscale_method) -> io.NodeOutput:
if scale_steps < 0:
scale_steps = None
sampler = comfy.samplers.KSAMPLER(sample_lcm_upscale, extra_options={"total_upscale": scale_ratio, "upscale_steps": scale_steps, "upscale_method": upscale_method})
return (sampler, )
return io.NodeOutput(sampler)
from comfy.k_diffusion.sampling import to_d
import comfy.model_patcher
@torch.no_grad()
def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
@@ -82,30 +86,36 @@ def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=No
return x
class SamplerEulerCFGpp:
class SamplerEulerCFGpp(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required":
{"version": (["regular", "alternative"],),}
}
RETURN_TYPES = ("SAMPLER",)
# CATEGORY = "sampling/custom_sampling/samplers"
CATEGORY = "_for_testing"
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="SamplerEulerCFGpp",
display_name="SamplerEulerCFG++",
category="_for_testing", # "sampling/custom_sampling/samplers"
inputs=[
io.Combo.Input("version", options=["regular", "alternative"]),
],
outputs=[io.Sampler.Output()],
is_experimental=True,
)
FUNCTION = "get_sampler"
def get_sampler(self, version):
@classmethod
def execute(cls, version) -> io.NodeOutput:
if version == "alternative":
sampler = comfy.samplers.KSAMPLER(sample_euler_pp)
else:
sampler = comfy.samplers.ksampler("euler_cfg_pp")
return (sampler, )
return io.NodeOutput(sampler)
NODE_CLASS_MAPPINGS = {
"SamplerLCMUpscale": SamplerLCMUpscale,
"SamplerEulerCFGpp": SamplerEulerCFGpp,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"SamplerEulerCFGpp": "SamplerEulerCFG++",
}
class AdvancedSamplersExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
SamplerLCMUpscale,
SamplerEulerCFGpp,
]
async def comfy_entrypoint() -> AdvancedSamplersExtension:
return AdvancedSamplersExtension()

View File

@@ -1,4 +1,8 @@
import torch
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
def project(v0, v1):
v1 = torch.nn.functional.normalize(v1, dim=[-1, -2, -3])
@@ -6,22 +10,45 @@ def project(v0, v1):
v0_orthogonal = v0 - v0_parallel
return v0_parallel, v0_orthogonal
class APG:
class APG(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"eta": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01, "tooltip": "Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1."}),
"norm_threshold": ("FLOAT", {"default": 5.0, "min": 0.0, "max": 50.0, "step": 0.1, "tooltip": "Normalize guidance vector to this value, normalization disable at a setting of 0."}),
"momentum": ("FLOAT", {"default": 0.0, "min": -5.0, "max": 1.0, "step": 0.01, "tooltip":"Controls a running average of guidance during diffusion, disabled at a setting of 0."}),
}
}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
CATEGORY = "sampling/custom_sampling"
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="APG",
display_name="Adaptive Projected Guidance",
category="sampling/custom_sampling",
inputs=[
io.Model.Input("model"),
io.Float.Input(
"eta",
default=1.0,
min=-10.0,
max=10.0,
step=0.01,
tooltip="Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1.",
),
io.Float.Input(
"norm_threshold",
default=5.0,
min=0.0,
max=50.0,
step=0.1,
tooltip="Normalize guidance vector to this value, normalization disable at a setting of 0.",
),
io.Float.Input(
"momentum",
default=0.0,
min=-5.0,
max=1.0,
step=0.01,
tooltip="Controls a running average of guidance during diffusion, disabled at a setting of 0.",
),
],
outputs=[io.Model.Output()],
)
def patch(self, model, eta, norm_threshold, momentum):
@classmethod
def execute(cls, model, eta, norm_threshold, momentum) -> io.NodeOutput:
running_avg = 0
prev_sigma = None
@@ -65,12 +92,15 @@ class APG:
m = model.clone()
m.set_model_sampler_pre_cfg_function(pre_cfg_function)
return (m,)
return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = {
"APG": APG,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"APG": "Adaptive Projected Guidance",
}
class ApgExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
APG,
]
async def comfy_entrypoint() -> ApgExtension:
return ApgExtension()

View File

@@ -1,3 +1,7 @@
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
def attention_multiply(attn, model, q, k, v, out):
m = model.clone()
@@ -16,57 +20,71 @@ def attention_multiply(attn, model, q, k, v, out):
return m
class UNetSelfAttentionMultiply:
class UNetSelfAttentionMultiply(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="UNetSelfAttentionMultiply",
category="_for_testing/attention_experiments",
inputs=[
io.Model.Input("model"),
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
],
outputs=[io.Model.Output()],
is_experimental=True,
)
CATEGORY = "_for_testing/attention_experiments"
def patch(self, model, q, k, v, out):
@classmethod
def execute(cls, model, q, k, v, out) -> io.NodeOutput:
m = attention_multiply("attn1", model, q, k, v, out)
return (m, )
return io.NodeOutput(m)
class UNetCrossAttentionMultiply:
class UNetCrossAttentionMultiply(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="UNetCrossAttentionMultiply",
category="_for_testing/attention_experiments",
inputs=[
io.Model.Input("model"),
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
],
outputs=[io.Model.Output()],
is_experimental=True,
)
CATEGORY = "_for_testing/attention_experiments"
def patch(self, model, q, k, v, out):
@classmethod
def execute(cls, model, q, k, v, out) -> io.NodeOutput:
m = attention_multiply("attn2", model, q, k, v, out)
return (m, )
return io.NodeOutput(m)
class CLIPAttentionMultiply:
class CLIPAttentionMultiply(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip": ("CLIP",),
"q": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"k": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"v": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"out": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "patch"
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="CLIPAttentionMultiply",
category="_for_testing/attention_experiments",
inputs=[
io.Clip.Input("clip"),
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
],
outputs=[io.Clip.Output()],
is_experimental=True,
)
CATEGORY = "_for_testing/attention_experiments"
def patch(self, clip, q, k, v, out):
@classmethod
def execute(cls, clip, q, k, v, out) -> io.NodeOutput:
m = clip.clone()
sd = m.patcher.model_state_dict()
@@ -79,23 +97,28 @@ class CLIPAttentionMultiply:
m.add_patches({key: (None,)}, 0.0, v)
if key.endswith("self_attn.out_proj.weight") or key.endswith("self_attn.out_proj.bias"):
m.add_patches({key: (None,)}, 0.0, out)
return (m, )
return io.NodeOutput(m)
class UNetTemporalAttentionMultiply:
class UNetTemporalAttentionMultiply(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"self_structural": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"self_temporal": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"cross_structural": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"cross_temporal": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="UNetTemporalAttentionMultiply",
category="_for_testing/attention_experiments",
inputs=[
io.Model.Input("model"),
io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("self_temporal", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("cross_structural", default=1.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("cross_temporal", default=1.0, min=0.0, max=10.0, step=0.01),
],
outputs=[io.Model.Output()],
is_experimental=True,
)
CATEGORY = "_for_testing/attention_experiments"
def patch(self, model, self_structural, self_temporal, cross_structural, cross_temporal):
@classmethod
def execute(cls, model, self_structural, self_temporal, cross_structural, cross_temporal) -> io.NodeOutput:
m = model.clone()
sd = model.model_state_dict()
@@ -110,11 +133,18 @@ class UNetTemporalAttentionMultiply:
m.add_patches({k: (None,)}, 0.0, cross_temporal)
else:
m.add_patches({k: (None,)}, 0.0, cross_structural)
return (m, )
return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = {
"UNetSelfAttentionMultiply": UNetSelfAttentionMultiply,
"UNetCrossAttentionMultiply": UNetCrossAttentionMultiply,
"CLIPAttentionMultiply": CLIPAttentionMultiply,
"UNetTemporalAttentionMultiply": UNetTemporalAttentionMultiply,
}
class AttentionMultiplyExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
UNetSelfAttentionMultiply,
UNetCrossAttentionMultiply,
CLIPAttentionMultiply,
UNetTemporalAttentionMultiply,
]
async def comfy_entrypoint() -> AttentionMultiplyExtension:
return AttentionMultiplyExtension()