mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-07-28 00:36:32 +00:00
Merge pull request #8943 from bigcat88/v3/nodes/nodes_a
[V3] 4 more converted files (starting with A letter)
This commit is contained in:
commit
3aceeab359
@ -355,7 +355,7 @@ class Int(ComfyTypeIO):
|
||||
"max": self.max,
|
||||
"step": self.step,
|
||||
"control_after_generate": self.control_after_generate,
|
||||
"display": self.display_mode.value if self.display_name else None,
|
||||
"display": self.display_mode.value if self.display_mode else None,
|
||||
})
|
||||
|
||||
@comfytype(io_type="FLOAT")
|
||||
|
128
comfy_extras/v3/nodes_advanced_samplers.py
Normal file
128
comfy_extras/v3/nodes_advanced_samplers.py
Normal file
@ -0,0 +1,128 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
from tqdm.auto import trange
|
||||
|
||||
import comfy.model_patcher
|
||||
import comfy.samplers
|
||||
import comfy.utils
|
||||
from comfy.k_diffusion.sampling import to_d
|
||||
from comfy_api.v3 import io
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_lcm_upscale(
|
||||
model, x, sigmas, extra_args=None, callback=None, disable=None, total_upscale=2.0, upscale_method="bislerp", upscale_steps=None
|
||||
):
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
if upscale_steps is None:
|
||||
upscale_steps = max(len(sigmas) // 2 + 1, 2)
|
||||
else:
|
||||
upscale_steps += 1
|
||||
upscale_steps = min(upscale_steps, len(sigmas) + 1)
|
||||
|
||||
upscales = np.linspace(1.0, total_upscale, upscale_steps)[1:]
|
||||
|
||||
orig_shape = x.size()
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
||||
if callback is not None:
|
||||
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
|
||||
|
||||
x = denoised
|
||||
if i < len(upscales):
|
||||
x = comfy.utils.common_upscale(
|
||||
x, round(orig_shape[-1] * upscales[i]), round(orig_shape[-2] * upscales[i]), upscale_method, "disabled"
|
||||
)
|
||||
|
||||
if sigmas[i + 1] > 0:
|
||||
x += sigmas[i + 1] * torch.randn_like(x)
|
||||
return x
|
||||
|
||||
|
||||
class SamplerLCMUpscale(io.ComfyNodeV3):
|
||||
UPSCALE_METHODS = ["bislerp", "nearest-exact", "bilinear", "area", "bicubic"]
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.SchemaV3:
|
||||
return io.SchemaV3(
|
||||
node_id="SamplerLCMUpscale_V3",
|
||||
category="sampling/custom_sampling/samplers",
|
||||
inputs=[
|
||||
io.Float.Input("scale_ratio", default=1.0, min=0.1, max=20.0, step=0.01),
|
||||
io.Int.Input("scale_steps", default=-1, min=-1, max=1000, step=1),
|
||||
io.Combo.Input("upscale_method", options=cls.UPSCALE_METHODS),
|
||||
],
|
||||
outputs=[io.Sampler.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, scale_ratio, scale_steps, upscale_method) -> io.NodeOutput:
|
||||
if scale_steps < 0:
|
||||
scale_steps = None
|
||||
sampler = comfy.samplers.KSAMPLER(
|
||||
sample_lcm_upscale,
|
||||
extra_options={
|
||||
"total_upscale": scale_ratio,
|
||||
"upscale_steps": scale_steps,
|
||||
"upscale_method": upscale_method,
|
||||
},
|
||||
)
|
||||
return io.NodeOutput(sampler)
|
||||
|
||||
|
||||
@torch.no_grad()
|
||||
def sample_euler_pp(model, x, sigmas, extra_args=None, callback=None, disable=None):
|
||||
extra_args = {} if extra_args is None else extra_args
|
||||
|
||||
temp = [0]
|
||||
|
||||
def post_cfg_function(args):
|
||||
temp[0] = args["uncond_denoised"]
|
||||
return args["denoised"]
|
||||
|
||||
model_options = extra_args.get("model_options", {}).copy()
|
||||
extra_args["model_options"] = comfy.model_patcher.set_model_options_post_cfg_function(
|
||||
model_options, post_cfg_function, disable_cfg1_optimization=True
|
||||
)
|
||||
|
||||
s_in = x.new_ones([x.shape[0]])
|
||||
for i in trange(len(sigmas) - 1, disable=disable):
|
||||
sigma_hat = sigmas[i]
|
||||
denoised = model(x, sigma_hat * s_in, **extra_args)
|
||||
d = to_d(x - denoised + temp[0], sigmas[i], denoised)
|
||||
if callback is not None:
|
||||
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
|
||||
dt = sigmas[i + 1] - sigma_hat
|
||||
x = x + d * dt
|
||||
return x
|
||||
|
||||
|
||||
class SamplerEulerCFGpp(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.SchemaV3:
|
||||
return io.SchemaV3(
|
||||
node_id="SamplerEulerCFGpp_V3",
|
||||
display_name="SamplerEulerCFG++ _V3",
|
||||
category="_for_testing",
|
||||
inputs=[
|
||||
io.Combo.Input("version", options=["regular", "alternative"]),
|
||||
],
|
||||
outputs=[io.Sampler.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, version) -> io.NodeOutput:
|
||||
if version == "alternative":
|
||||
sampler = comfy.samplers.KSAMPLER(sample_euler_pp)
|
||||
else:
|
||||
sampler = comfy.samplers.ksampler("euler_cfg_pp")
|
||||
return io.NodeOutput(sampler)
|
||||
|
||||
|
||||
NODES_LIST = [
|
||||
SamplerLCMUpscale,
|
||||
SamplerEulerCFGpp,
|
||||
]
|
83
comfy_extras/v3/nodes_align_your_steps.py
Normal file
83
comfy_extras/v3/nodes_align_your_steps.py
Normal file
@ -0,0 +1,83 @@
|
||||
# from: https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from comfy_api.v3 import io
|
||||
|
||||
NOISE_LEVELS = {
|
||||
"SD1": [
|
||||
14.6146412293,
|
||||
6.4745760956,
|
||||
3.8636745985,
|
||||
2.6946151520,
|
||||
1.8841921177,
|
||||
1.3943805092,
|
||||
0.9642583904,
|
||||
0.6523686016,
|
||||
0.3977456272,
|
||||
0.1515232662,
|
||||
0.0291671582,
|
||||
],
|
||||
"SDXL": [
|
||||
14.6146412293,
|
||||
6.3184485287,
|
||||
3.7681790315,
|
||||
2.1811480769,
|
||||
1.3405244945,
|
||||
0.8620721141,
|
||||
0.5550693289,
|
||||
0.3798540708,
|
||||
0.2332364134,
|
||||
0.1114188177,
|
||||
0.0291671582,
|
||||
],
|
||||
"SVD": [700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002],
|
||||
}
|
||||
|
||||
|
||||
def loglinear_interp(t_steps, num_steps):
|
||||
"""Performs log-linear interpolation of a given array of decreasing numbers."""
|
||||
xs = np.linspace(0, 1, len(t_steps))
|
||||
ys = np.log(t_steps[::-1])
|
||||
|
||||
new_xs = np.linspace(0, 1, num_steps)
|
||||
new_ys = np.interp(new_xs, xs, ys)
|
||||
|
||||
return np.exp(new_ys)[::-1].copy()
|
||||
|
||||
|
||||
class AlignYourStepsScheduler(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.SchemaV3:
|
||||
return io.SchemaV3(
|
||||
node_id="AlignYourStepsScheduler_V3",
|
||||
category="sampling/custom_sampling/schedulers",
|
||||
inputs=[
|
||||
io.Combo.Input("model_type", options=["SD1", "SDXL", "SVD"]),
|
||||
io.Int.Input("steps", default=10, min=1, max=10000),
|
||||
io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Sigmas.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model_type, steps, denoise) -> io.NodeOutput:
|
||||
total_steps = steps
|
||||
if denoise < 1.0:
|
||||
if denoise <= 0.0:
|
||||
return io.NodeOutput(torch.FloatTensor([]))
|
||||
total_steps = round(steps * denoise)
|
||||
|
||||
sigmas = NOISE_LEVELS[model_type][:]
|
||||
if (steps + 1) != len(sigmas):
|
||||
sigmas = loglinear_interp(sigmas, steps + 1)
|
||||
|
||||
sigmas = sigmas[-(total_steps + 1) :]
|
||||
sigmas[-1] = 0
|
||||
return io.NodeOutput(torch.FloatTensor(sigmas))
|
||||
|
||||
|
||||
NODES_LIST = [
|
||||
AlignYourStepsScheduler,
|
||||
]
|
98
comfy_extras/v3/nodes_apg.py
Normal file
98
comfy_extras/v3/nodes_apg.py
Normal file
@ -0,0 +1,98 @@
|
||||
import torch
|
||||
|
||||
from comfy_api.v3 import io
|
||||
|
||||
|
||||
def project(v0, v1):
|
||||
v1 = torch.nn.functional.normalize(v1, dim=[-1, -2, -3])
|
||||
v0_parallel = (v0 * v1).sum(dim=[-1, -2, -3], keepdim=True) * v1
|
||||
v0_orthogonal = v0 - v0_parallel
|
||||
return v0_parallel, v0_orthogonal
|
||||
|
||||
|
||||
class APG(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.SchemaV3:
|
||||
return io.SchemaV3(
|
||||
node_id="APG_V3",
|
||||
display_name="Adaptive Projected Guidance _V3",
|
||||
category="sampling/custom_sampling",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input(
|
||||
"eta",
|
||||
default=1.0,
|
||||
min=-10.0,
|
||||
max=10.0,
|
||||
step=0.01,
|
||||
tooltip="Controls the scale of the parallel guidance vector. Default CFG behavior at a setting of 1.",
|
||||
),
|
||||
io.Float.Input(
|
||||
"norm_threshold",
|
||||
default=5.0,
|
||||
min=0.0,
|
||||
max=50.0,
|
||||
step=0.1,
|
||||
tooltip="Normalize guidance vector to this value, normalization disable at a setting of 0.",
|
||||
),
|
||||
io.Float.Input(
|
||||
"momentum",
|
||||
default=0.0,
|
||||
min=-5.0,
|
||||
max=1.0,
|
||||
step=0.01,
|
||||
tooltip="Controls a running average of guidance during diffusion, disabled at a setting of 0.",
|
||||
),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, eta, norm_threshold, momentum) -> io.NodeOutput:
|
||||
running_avg = 0
|
||||
prev_sigma = None
|
||||
|
||||
def pre_cfg_function(args):
|
||||
nonlocal running_avg, prev_sigma
|
||||
|
||||
if len(args["conds_out"]) == 1:
|
||||
return args["conds_out"]
|
||||
|
||||
cond = args["conds_out"][0]
|
||||
uncond = args["conds_out"][1]
|
||||
sigma = args["sigma"][0]
|
||||
cond_scale = args["cond_scale"]
|
||||
|
||||
if prev_sigma is not None and sigma > prev_sigma:
|
||||
running_avg = 0
|
||||
prev_sigma = sigma
|
||||
|
||||
guidance = cond - uncond
|
||||
|
||||
if momentum != 0:
|
||||
if not torch.is_tensor(running_avg):
|
||||
running_avg = guidance
|
||||
else:
|
||||
running_avg = momentum * running_avg + guidance
|
||||
guidance = running_avg
|
||||
|
||||
if norm_threshold > 0:
|
||||
guidance_norm = guidance.norm(p=2, dim=[-1, -2, -3], keepdim=True)
|
||||
scale = torch.minimum(torch.ones_like(guidance_norm), norm_threshold / guidance_norm)
|
||||
guidance = guidance * scale
|
||||
|
||||
guidance_parallel, guidance_orthogonal = project(guidance, cond)
|
||||
modified_guidance = guidance_orthogonal + eta * guidance_parallel
|
||||
|
||||
modified_cond = (uncond + modified_guidance) + (cond - uncond) / cond_scale
|
||||
|
||||
return [modified_cond, uncond] + args["conds_out"][2:]
|
||||
|
||||
m = model.clone()
|
||||
m.set_model_sampler_pre_cfg_function(pre_cfg_function)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST = [
|
||||
APG,
|
||||
]
|
139
comfy_extras/v3/nodes_attention_multiply.py
Normal file
139
comfy_extras/v3/nodes_attention_multiply.py
Normal file
@ -0,0 +1,139 @@
|
||||
from comfy_api.v3 import io
|
||||
|
||||
|
||||
def attention_multiply(attn, model, q, k, v, out):
|
||||
m = model.clone()
|
||||
sd = model.model_state_dict()
|
||||
|
||||
for key in sd:
|
||||
if key.endswith("{}.to_q.bias".format(attn)) or key.endswith("{}.to_q.weight".format(attn)):
|
||||
m.add_patches({key: (None,)}, 0.0, q)
|
||||
if key.endswith("{}.to_k.bias".format(attn)) or key.endswith("{}.to_k.weight".format(attn)):
|
||||
m.add_patches({key: (None,)}, 0.0, k)
|
||||
if key.endswith("{}.to_v.bias".format(attn)) or key.endswith("{}.to_v.weight".format(attn)):
|
||||
m.add_patches({key: (None,)}, 0.0, v)
|
||||
if key.endswith("{}.to_out.0.bias".format(attn)) or key.endswith("{}.to_out.0.weight".format(attn)):
|
||||
m.add_patches({key: (None,)}, 0.0, out)
|
||||
return m
|
||||
|
||||
|
||||
class UNetSelfAttentionMultiply(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.SchemaV3:
|
||||
return io.SchemaV3(
|
||||
node_id="UNetSelfAttentionMultiply_V3",
|
||||
category="_for_testing/attention_experiments",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, q, k, v, out) -> io.NodeOutput:
|
||||
return io.NodeOutput(attention_multiply("attn1", model, q, k, v, out))
|
||||
|
||||
|
||||
class UNetCrossAttentionMultiply(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.SchemaV3:
|
||||
return io.SchemaV3(
|
||||
node_id="UNetCrossAttentionMultiply_V3",
|
||||
category="_for_testing/attention_experiments",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, q, k, v, out) -> io.NodeOutput:
|
||||
return io.NodeOutput(attention_multiply("attn2", model, q, k, v, out))
|
||||
|
||||
|
||||
class CLIPAttentionMultiply(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.SchemaV3:
|
||||
return io.SchemaV3(
|
||||
node_id="CLIPAttentionMultiply_V3",
|
||||
category="_for_testing/attention_experiments",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("k", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("v", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("out", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Clip.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip, q, k, v, out) -> io.NodeOutput:
|
||||
m = clip.clone()
|
||||
sd = m.patcher.model_state_dict()
|
||||
|
||||
for key in sd:
|
||||
if key.endswith("self_attn.q_proj.weight") or key.endswith("self_attn.q_proj.bias"):
|
||||
m.add_patches({key: (None,)}, 0.0, q)
|
||||
if key.endswith("self_attn.k_proj.weight") or key.endswith("self_attn.k_proj.bias"):
|
||||
m.add_patches({key: (None,)}, 0.0, k)
|
||||
if key.endswith("self_attn.v_proj.weight") or key.endswith("self_attn.v_proj.bias"):
|
||||
m.add_patches({key: (None,)}, 0.0, v)
|
||||
if key.endswith("self_attn.out_proj.weight") or key.endswith("self_attn.out_proj.bias"):
|
||||
m.add_patches({key: (None,)}, 0.0, out)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
class UNetTemporalAttentionMultiply(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls) -> io.SchemaV3:
|
||||
return io.SchemaV3(
|
||||
node_id="UNetTemporalAttentionMultiply_V3",
|
||||
category="_for_testing/attention_experiments",
|
||||
inputs=[
|
||||
io.Model.Input("model"),
|
||||
io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("self_temporal", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("cross_structural", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
io.Float.Input("cross_temporal", default=1.0, min=0.0, max=10.0, step=0.01),
|
||||
],
|
||||
outputs=[io.Model.Output()],
|
||||
is_experimental=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, model, self_structural, self_temporal, cross_structural, cross_temporal) -> io.NodeOutput:
|
||||
m = model.clone()
|
||||
sd = model.model_state_dict()
|
||||
|
||||
for k in sd:
|
||||
if (k.endswith("attn1.to_out.0.bias") or k.endswith("attn1.to_out.0.weight")):
|
||||
if '.time_stack.' in k:
|
||||
m.add_patches({k: (None,)}, 0.0, self_temporal)
|
||||
else:
|
||||
m.add_patches({k: (None,)}, 0.0, self_structural)
|
||||
elif (k.endswith("attn2.to_out.0.bias") or k.endswith("attn2.to_out.0.weight")):
|
||||
if '.time_stack.' in k:
|
||||
m.add_patches({k: (None,)}, 0.0, cross_temporal)
|
||||
else:
|
||||
m.add_patches({k: (None,)}, 0.0, cross_structural)
|
||||
return io.NodeOutput(m)
|
||||
|
||||
|
||||
NODES_LIST = [
|
||||
UNetSelfAttentionMultiply,
|
||||
UNetCrossAttentionMultiply,
|
||||
CLIPAttentionMultiply,
|
||||
UNetTemporalAttentionMultiply,
|
||||
]
|
@ -16,7 +16,7 @@ from comfy.cli_args import args
|
||||
from comfy_api.v3 import io, ui
|
||||
|
||||
|
||||
class ConditioningStableAudio_V3(io.ComfyNodeV3):
|
||||
class ConditioningStableAudio(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -46,7 +46,7 @@ class ConditioningStableAudio_V3(io.ComfyNodeV3):
|
||||
)
|
||||
|
||||
|
||||
class EmptyLatentAudio_V3(io.ComfyNodeV3):
|
||||
class EmptyLatentAudio(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -68,7 +68,7 @@ class EmptyLatentAudio_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput({"samples": latent, "type": "audio"})
|
||||
|
||||
|
||||
class LoadAudio_V3(io.ComfyNodeV3):
|
||||
class LoadAudio(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -106,7 +106,7 @@ class LoadAudio_V3(io.ComfyNodeV3):
|
||||
return True
|
||||
|
||||
|
||||
class PreviewAudio_V3(io.ComfyNodeV3):
|
||||
class PreviewAudio(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -125,7 +125,7 @@ class PreviewAudio_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput(ui=ui.PreviewAudio(audio, cls=cls))
|
||||
|
||||
|
||||
class SaveAudioMP3_V3(io.ComfyNodeV3):
|
||||
class SaveAudioMP3(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -146,7 +146,7 @@ class SaveAudioMP3_V3(io.ComfyNodeV3):
|
||||
return _save_audio(self, audio, filename_prefix, format, quality)
|
||||
|
||||
|
||||
class SaveAudioOpus_V3(io.ComfyNodeV3):
|
||||
class SaveAudioOpus(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -167,7 +167,7 @@ class SaveAudioOpus_V3(io.ComfyNodeV3):
|
||||
return _save_audio(self, audio, filename_prefix, format, quality)
|
||||
|
||||
|
||||
class SaveAudio_V3(io.ComfyNodeV3):
|
||||
class SaveAudio(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -187,7 +187,7 @@ class SaveAudio_V3(io.ComfyNodeV3):
|
||||
return _save_audio(cls, audio, filename_prefix, format)
|
||||
|
||||
|
||||
class VAEDecodeAudio_V3(io.ComfyNodeV3):
|
||||
class VAEDecodeAudio(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -209,7 +209,7 @@ class VAEDecodeAudio_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput({"waveform": audio, "sample_rate": 44100})
|
||||
|
||||
|
||||
class VAEEncodeAudio_V3(io.ComfyNodeV3):
|
||||
class VAEEncodeAudio(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -335,13 +335,13 @@ def _save_audio(cls, audio, filename_prefix="ComfyUI", format="flac", quality="1
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNodeV3]] = [
|
||||
ConditioningStableAudio_V3,
|
||||
EmptyLatentAudio_V3,
|
||||
LoadAudio_V3,
|
||||
PreviewAudio_V3,
|
||||
SaveAudioMP3_V3,
|
||||
SaveAudioOpus_V3,
|
||||
SaveAudio_V3,
|
||||
VAEDecodeAudio_V3,
|
||||
VAEEncodeAudio_V3,
|
||||
ConditioningStableAudio,
|
||||
EmptyLatentAudio,
|
||||
LoadAudio,
|
||||
PreviewAudio,
|
||||
SaveAudioMP3,
|
||||
SaveAudioOpus,
|
||||
SaveAudio,
|
||||
VAEDecodeAudio,
|
||||
VAEEncodeAudio,
|
||||
]
|
||||
|
@ -3,7 +3,7 @@ from comfy.cldm.control_types import UNION_CONTROLNET_TYPES
|
||||
from comfy_api.v3 import io
|
||||
|
||||
|
||||
class ControlNetApplyAdvanced_V3(io.ComfyNodeV3):
|
||||
class ControlNetApplyAdvanced(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -60,7 +60,7 @@ class ControlNetApplyAdvanced_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput(out[0], out[1])
|
||||
|
||||
|
||||
class SetUnionControlNetType_V3(io.ComfyNodeV3):
|
||||
class SetUnionControlNetType(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -87,7 +87,7 @@ class SetUnionControlNetType_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput(control_net)
|
||||
|
||||
|
||||
class ControlNetInpaintingAliMamaApply_V3(ControlNetApplyAdvanced_V3):
|
||||
class ControlNetInpaintingAliMamaApply(ControlNetApplyAdvanced):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -135,7 +135,7 @@ class ControlNetInpaintingAliMamaApply_V3(ControlNetApplyAdvanced_V3):
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNodeV3]] = [
|
||||
ControlNetApplyAdvanced_V3,
|
||||
SetUnionControlNetType_V3,
|
||||
ControlNetInpaintingAliMamaApply_V3,
|
||||
ControlNetApplyAdvanced,
|
||||
SetUnionControlNetType,
|
||||
ControlNetInpaintingAliMamaApply,
|
||||
]
|
||||
|
@ -5,7 +5,7 @@ import sys
|
||||
from comfy_api.v3 import io
|
||||
|
||||
|
||||
class String_V3(io.ComfyNodeV3):
|
||||
class String(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -23,7 +23,7 @@ class String_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class StringMultiline_V3(io.ComfyNodeV3):
|
||||
class StringMultiline(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -41,7 +41,7 @@ class StringMultiline_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class Int_V3(io.ComfyNodeV3):
|
||||
class Int(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -59,7 +59,7 @@ class Int_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class Float_V3(io.ComfyNodeV3):
|
||||
class Float(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -77,7 +77,7 @@ class Float_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput(value)
|
||||
|
||||
|
||||
class Boolean_V3(io.ComfyNodeV3):
|
||||
class Boolean(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -96,9 +96,9 @@ class Boolean_V3(io.ComfyNodeV3):
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNodeV3]] = [
|
||||
String_V3,
|
||||
StringMultiline_V3,
|
||||
Int_V3,
|
||||
Float_V3,
|
||||
Boolean_V3,
|
||||
String,
|
||||
StringMultiline,
|
||||
Int,
|
||||
Float,
|
||||
Boolean,
|
||||
]
|
||||
|
@ -23,7 +23,7 @@ import nodes
|
||||
from comfy_api.v3 import io
|
||||
|
||||
|
||||
class StableCascade_EmptyLatentImage_V3(io.ComfyNodeV3):
|
||||
class StableCascade_EmptyLatentImage(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -48,7 +48,7 @@ class StableCascade_EmptyLatentImage_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput({"samples": c_latent}, {"samples": b_latent})
|
||||
|
||||
|
||||
class StableCascade_StageC_VAEEncode_V3(io.ComfyNodeV3):
|
||||
class StableCascade_StageC_VAEEncode(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -79,7 +79,7 @@ class StableCascade_StageC_VAEEncode_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput({"samples": c_latent}, {"samples": b_latent})
|
||||
|
||||
|
||||
class StableCascade_StageB_Conditioning_V3(io.ComfyNodeV3):
|
||||
class StableCascade_StageB_Conditioning(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -105,7 +105,7 @@ class StableCascade_StageB_Conditioning_V3(io.ComfyNodeV3):
|
||||
return io.NodeOutput(c)
|
||||
|
||||
|
||||
class StableCascade_SuperResolutionControlnet_V3(io.ComfyNodeV3):
|
||||
class StableCascade_SuperResolutionControlnet(io.ComfyNodeV3):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return io.SchemaV3(
|
||||
@ -136,8 +136,8 @@ class StableCascade_SuperResolutionControlnet_V3(io.ComfyNodeV3):
|
||||
|
||||
|
||||
NODES_LIST: list[type[io.ComfyNodeV3]] = [
|
||||
StableCascade_EmptyLatentImage_V3,
|
||||
StableCascade_StageB_Conditioning_V3,
|
||||
StableCascade_StageC_VAEEncode_V3,
|
||||
StableCascade_SuperResolutionControlnet_V3,
|
||||
StableCascade_EmptyLatentImage,
|
||||
StableCascade_StageB_Conditioning,
|
||||
StableCascade_StageC_VAEEncode,
|
||||
StableCascade_SuperResolutionControlnet,
|
||||
]
|
||||
|
4
nodes.py
4
nodes.py
@ -2300,7 +2300,11 @@ def init_builtin_extra_nodes():
|
||||
"nodes_v3_test.py",
|
||||
"nodes_v1_test.py",
|
||||
"v3/nodes_ace.py",
|
||||
"v3/nodes_advanced_samplers.py",
|
||||
"v3/nodes_align_your_steps.py",
|
||||
"v3/nodes_audio.py",
|
||||
"v3/nodes_apg.py",
|
||||
"v3/nodes_attention_multiply.py",
|
||||
"v3/nodes_controlnet.py",
|
||||
"v3/nodes_images.py",
|
||||
"v3/nodes_mask.py",
|
||||
|
@ -24,7 +24,7 @@ lint.select = [
|
||||
"F",
|
||||
]
|
||||
exclude = ["*.ipynb"]
|
||||
line-length = 120
|
||||
line-length = 144
|
||||
lint.pycodestyle.ignore-overlong-task-comments = true
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
|
Loading…
x
Reference in New Issue
Block a user