pass "id" in Schema inputs as an arg instead of kwarg

This commit is contained in:
bigcat88 2025-07-24 22:03:50 +03:00
parent 66cd5152fd
commit f569823738
No known key found for this signature in database
GPG Key ID: 1F0BF0EC3CF22721
17 changed files with 154 additions and 154 deletions

View File

@ -19,14 +19,14 @@ class ConditioningStableAudio(io.ComfyNode):
node_id="ConditioningStableAudio_V3", node_id="ConditioningStableAudio_V3",
category="conditioning", category="conditioning",
inputs=[ inputs=[
io.Conditioning.Input(id="positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input(id="negative"), io.Conditioning.Input("negative"),
io.Float.Input(id="seconds_start", default=0.0, min=0.0, max=1000.0, step=0.1), io.Float.Input("seconds_start", default=0.0, min=0.0, max=1000.0, step=0.1),
io.Float.Input(id="seconds_total", default=47.0, min=0.0, max=1000.0, step=0.1), io.Float.Input("seconds_total", default=47.0, min=0.0, max=1000.0, step=0.1),
], ],
outputs=[ outputs=[
io.Conditioning.Output(id="positive_out", display_name="positive"), io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(id="negative_out", display_name="negative"), io.Conditioning.Output(display_name="negative"),
], ],
) )
@ -49,7 +49,7 @@ class EmptyLatentAudio(io.ComfyNode):
node_id="EmptyLatentAudio_V3", node_id="EmptyLatentAudio_V3",
category="latent/audio", category="latent/audio",
inputs=[ inputs=[
io.Float.Input(id="seconds", default=47.6, min=1.0, max=1000.0, step=0.1), io.Float.Input("seconds", default=47.6, min=1.0, max=1000.0, step=0.1),
io.Int.Input( io.Int.Input(
id="batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch." id="batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."
), ),
@ -200,8 +200,8 @@ class VAEDecodeAudio(io.ComfyNode):
node_id="VAEDecodeAudio_V3", node_id="VAEDecodeAudio_V3",
category="latent/audio", category="latent/audio",
inputs=[ inputs=[
io.Latent.Input(id="samples"), io.Latent.Input("samples"),
io.Vae.Input(id="vae"), io.Vae.Input("vae"),
], ],
outputs=[io.Audio.Output()], outputs=[io.Audio.Output()],
) )
@ -222,8 +222,8 @@ class VAEEncodeAudio(io.ComfyNode):
node_id="VAEEncodeAudio_V3", node_id="VAEEncodeAudio_V3",
category="latent/audio", category="latent/audio",
inputs=[ inputs=[
io.Audio.Input(id="audio"), io.Audio.Input("audio"),
io.Vae.Input(id="vae"), io.Vae.Input("vae"),
], ],
outputs=[io.Latent.Output()], outputs=[io.Latent.Output()],
) )

View File

@ -13,7 +13,7 @@ class DifferentialDiffusion(io.ComfyNode):
display_name="Differential Diffusion _V3", display_name="Differential Diffusion _V3",
category="_for_testing", category="_for_testing",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
], ],
outputs=[ outputs=[
io.Model.Output(), io.Model.Output(),

View File

@ -32,10 +32,10 @@ class CLIPTextEncodeFlux(io.ComfyNode):
node_id="CLIPTextEncodeFlux_V3", node_id="CLIPTextEncodeFlux_V3",
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
inputs=[ inputs=[
io.Clip.Input(id="clip"), io.Clip.Input("clip"),
io.String.Input(id="clip_l", multiline=True, dynamic_prompts=True), io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
io.String.Input(id="t5xxl", multiline=True, dynamic_prompts=True), io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
io.Float.Input(id="guidance", default=3.5, min=0.0, max=100.0, step=0.1), io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
], ],
outputs=[ outputs=[
io.Conditioning.Output(), io.Conditioning.Output(),
@ -58,7 +58,7 @@ class FluxDisableGuidance(io.ComfyNode):
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
description="This node completely disables the guidance embed on Flux and Flux like models", description="This node completely disables the guidance embed on Flux and Flux like models",
inputs=[ inputs=[
io.Conditioning.Input(id="conditioning"), io.Conditioning.Input("conditioning"),
], ],
outputs=[ outputs=[
io.Conditioning.Output(), io.Conditioning.Output(),
@ -78,8 +78,8 @@ class FluxGuidance(io.ComfyNode):
node_id="FluxGuidance_V3", node_id="FluxGuidance_V3",
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
inputs=[ inputs=[
io.Conditioning.Input(id="conditioning"), io.Conditioning.Input("conditioning"),
io.Float.Input(id="guidance", default=3.5, min=0.0, max=100.0, step=0.1), io.Float.Input("guidance", default=3.5, min=0.0, max=100.0, step=0.1),
], ],
outputs=[ outputs=[
io.Conditioning.Output(), io.Conditioning.Output(),
@ -100,7 +100,7 @@ class FluxKontextImageScale(io.ComfyNode):
category="advanced/conditioning/flux", category="advanced/conditioning/flux",
description="This node resizes the image to one that is more optimal for flux kontext.", description="This node resizes the image to one that is more optimal for flux kontext.",
inputs=[ inputs=[
io.Image.Input(id="image"), io.Image.Input("image"),
], ],
outputs=[ outputs=[
io.Image.Output(), io.Image.Output(),

View File

@ -35,11 +35,11 @@ class FreeU(io.ComfyNode):
node_id="FreeU_V3", node_id="FreeU_V3",
category="model_patches/unet", category="model_patches/unet",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.Float.Input(id="b1", default=1.1, min=0.0, max=10.0, step=0.01), io.Float.Input("b1", default=1.1, min=0.0, max=10.0, step=0.01),
io.Float.Input(id="b2", default=1.2, min=0.0, max=10.0, step=0.01), io.Float.Input("b2", default=1.2, min=0.0, max=10.0, step=0.01),
io.Float.Input(id="s1", default=0.9, min=0.0, max=10.0, step=0.01), io.Float.Input("s1", default=0.9, min=0.0, max=10.0, step=0.01),
io.Float.Input(id="s2", default=0.2, min=0.0, max=10.0, step=0.01), io.Float.Input("s2", default=0.2, min=0.0, max=10.0, step=0.01),
], ],
outputs=[ outputs=[
io.Model.Output(), io.Model.Output(),
@ -80,11 +80,11 @@ class FreeU_V2(io.ComfyNode):
node_id="FreeU_V2_V3", node_id="FreeU_V2_V3",
category="model_patches/unet", category="model_patches/unet",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.Float.Input(id="b1", default=1.3, min=0.0, max=10.0, step=0.01), io.Float.Input("b1", default=1.3, min=0.0, max=10.0, step=0.01),
io.Float.Input(id="b2", default=1.4, min=0.0, max=10.0, step=0.01), io.Float.Input("b2", default=1.4, min=0.0, max=10.0, step=0.01),
io.Float.Input(id="s1", default=0.9, min=0.0, max=10.0, step=0.01), io.Float.Input("s1", default=0.9, min=0.0, max=10.0, step=0.01),
io.Float.Input(id="s2", default=0.2, min=0.0, max=10.0, step=0.01), io.Float.Input("s2", default=0.2, min=0.0, max=10.0, step=0.01),
], ],
outputs=[ outputs=[
io.Model.Output(), io.Model.Output(),

View File

@ -65,12 +65,12 @@ class FreSca(io.ComfyNode):
category="_for_testing", category="_for_testing",
description="Applies frequency-dependent scaling to the guidance", description="Applies frequency-dependent scaling to the guidance",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.Float.Input(id="scale_low", default=1.0, min=0, max=10, step=0.01, io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,
tooltip="Scaling factor for low-frequency components"), tooltip="Scaling factor for low-frequency components"),
io.Float.Input(id="scale_high", default=1.25, min=0, max=10, step=0.01, io.Float.Input("scale_high", default=1.25, min=0, max=10, step=0.01,
tooltip="Scaling factor for high-frequency components"), tooltip="Scaling factor for high-frequency components"),
io.Int.Input(id="freq_cutoff", default=20, min=1, max=10000, step=1, io.Int.Input("freq_cutoff", default=20, min=1, max=10000, step=1,
tooltip="Number of frequency indices around center to consider as low-frequency"), tooltip="Number of frequency indices around center to consider as low-frequency"),
], ],
outputs=[ outputs=[

View File

@ -343,9 +343,9 @@ class GITSScheduler(io.ComfyNode):
node_id="GITSScheduler_V3", node_id="GITSScheduler_V3",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
inputs=[ inputs=[
io.Float.Input(id="coeff", default=1.20, min=0.80, max=1.50, step=0.05), io.Float.Input("coeff", default=1.20, min=0.80, max=1.50, step=0.05),
io.Int.Input(id="steps", default=10, min=2, max=1000), io.Int.Input("steps", default=10, min=2, max=1000),
io.Float.Input(id="denoise", default=1.0, min=0.0, max=1.0, step=0.01), io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
], ],
outputs=[ outputs=[
io.Sigmas.Output(), io.Sigmas.Output(),

View File

@ -33,11 +33,11 @@ class HyperTile(io.ComfyNode):
node_id="HyperTile_V3", node_id="HyperTile_V3",
category="model_patches/unet", category="model_patches/unet",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.Int.Input(id="tile_size", default=256, min=1, max=2048), io.Int.Input("tile_size", default=256, min=1, max=2048),
io.Int.Input(id="swap_size", default=2, min=1, max=128), io.Int.Input("swap_size", default=2, min=1, max=128),
io.Int.Input(id="max_depth", default=0, min=0, max=10), io.Int.Input("max_depth", default=0, min=0, max=10),
io.Boolean.Input(id="scale_depth", default=False), io.Boolean.Input("scale_depth", default=False),
], ],
outputs=[ outputs=[
io.Model.Output(), io.Model.Output(),

View File

@ -12,10 +12,10 @@ class InstructPixToPixConditioning(io.ComfyNode):
node_id="InstructPixToPixConditioning_V3", node_id="InstructPixToPixConditioning_V3",
category="conditioning/instructpix2pix", category="conditioning/instructpix2pix",
inputs=[ inputs=[
io.Conditioning.Input(id="positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input(id="negative"), io.Conditioning.Input("negative"),
io.Vae.Input(id="vae"), io.Vae.Input("vae"),
io.Image.Input(id="pixels"), io.Image.Input("pixels"),
], ],
outputs=[ outputs=[
io.Conditioning.Output(display_name="positive"), io.Conditioning.Output(display_name="positive"),

View File

@ -24,8 +24,8 @@ class LatentAdd(io.ComfyNode):
node_id="LatentAdd_V3", node_id="LatentAdd_V3",
category="latent/advanced", category="latent/advanced",
inputs=[ inputs=[
io.Latent.Input(id="samples1"), io.Latent.Input("samples1"),
io.Latent.Input(id="samples2"), io.Latent.Input("samples2"),
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
@ -52,8 +52,8 @@ class LatentApplyOperation(io.ComfyNode):
category="latent/advanced/operations", category="latent/advanced/operations",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Latent.Input(id="samples"), io.Latent.Input("samples"),
io.LatentOperation.Input(id="operation"), io.LatentOperation.Input("operation"),
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
@ -77,8 +77,8 @@ class LatentApplyOperationCFG(io.ComfyNode):
category="latent/advanced/operations", category="latent/advanced/operations",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.LatentOperation.Input(id="operation"), io.LatentOperation.Input("operation"),
], ],
outputs=[ outputs=[
io.Model.Output(), io.Model.Output(),
@ -108,8 +108,8 @@ class LatentBatch(io.ComfyNode):
node_id="LatentBatch_V3", node_id="LatentBatch_V3",
category="latent/batch", category="latent/batch",
inputs=[ inputs=[
io.Latent.Input(id="samples1"), io.Latent.Input("samples1"),
io.Latent.Input(id="samples2"), io.Latent.Input("samples2"),
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
@ -137,8 +137,8 @@ class LatentBatchSeedBehavior(io.ComfyNode):
node_id="LatentBatchSeedBehavior_V3", node_id="LatentBatchSeedBehavior_V3",
category="latent/advanced", category="latent/advanced",
inputs=[ inputs=[
io.Latent.Input(id="samples"), io.Latent.Input("samples"),
io.Combo.Input(id="seed_behavior", options=["random", "fixed"], default="fixed"), io.Combo.Input("seed_behavior", options=["random", "fixed"], default="fixed"),
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
@ -166,9 +166,9 @@ class LatentInterpolate(io.ComfyNode):
node_id="LatentInterpolate_V3", node_id="LatentInterpolate_V3",
category="latent/advanced", category="latent/advanced",
inputs=[ inputs=[
io.Latent.Input(id="samples1"), io.Latent.Input("samples1"),
io.Latent.Input(id="samples2"), io.Latent.Input("samples2"),
io.Float.Input(id="ratio", default=1.0, min=0.0, max=1.0, step=0.01), io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01),
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
@ -205,8 +205,8 @@ class LatentMultiply(io.ComfyNode):
node_id="LatentMultiply_V3", node_id="LatentMultiply_V3",
category="latent/advanced", category="latent/advanced",
inputs=[ inputs=[
io.Latent.Input(id="samples"), io.Latent.Input("samples"),
io.Float.Input(id="multiplier", default=1.0, min=-10.0, max=10.0, step=0.01), io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
@ -230,9 +230,9 @@ class LatentOperationSharpen(io.ComfyNode):
category="latent/advanced/operations", category="latent/advanced/operations",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Int.Input(id="sharpen_radius", default=9, min=1, max=31, step=1), io.Int.Input("sharpen_radius", default=9, min=1, max=31, step=1),
io.Float.Input(id="sigma", default=1.0, min=0.1, max=10.0, step=0.1), io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1),
io.Float.Input(id="alpha", default=0.1, min=0.0, max=5.0, step=0.01), io.Float.Input("alpha", default=0.1, min=0.0, max=5.0, step=0.01),
], ],
outputs=[ outputs=[
io.LatentOperation.Output(), io.LatentOperation.Output(),
@ -272,7 +272,7 @@ class LatentOperationTonemapReinhard(io.ComfyNode):
category="latent/advanced/operations", category="latent/advanced/operations",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Float.Input(id="multiplier", default=1.0, min=0.0, max=100.0, step=0.01), io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
], ],
outputs=[ outputs=[
io.LatentOperation.Output(), io.LatentOperation.Output(),
@ -306,8 +306,8 @@ class LatentSubtract(io.ComfyNode):
node_id="LatentSubtract_V3", node_id="LatentSubtract_V3",
category="latent/advanced", category="latent/advanced",
inputs=[ inputs=[
io.Latent.Input(id="samples1"), io.Latent.Input("samples1"),
io.Latent.Input(id="samples2"), io.Latent.Input("samples2"),
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),

View File

@ -35,10 +35,10 @@ class Load3D(io.ComfyNode):
category="3d", category="3d",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Combo.Input(id="model_file", options=sorted(files), upload=io.UploadType.model), io.Combo.Input("model_file", options=sorted(files), upload=io.UploadType.model),
io.Load3D.Input(id="image"), io.Load3D.Input("image"),
io.Int.Input(id="width", default=1024, min=1, max=4096, step=1), io.Int.Input("width", default=1024, min=1, max=4096, step=1),
io.Int.Input(id="height", default=1024, min=1, max=4096, step=1), io.Int.Input("height", default=1024, min=1, max=4096, step=1),
], ],
outputs=[ outputs=[
io.Image.Output(display_name="image"), io.Image.Output(display_name="image"),
@ -96,10 +96,10 @@ class Load3DAnimation(io.ComfyNode):
category="3d", category="3d",
is_experimental=True, is_experimental=True,
inputs=[ inputs=[
io.Combo.Input(id="model_file", options=sorted(files), upload=io.UploadType.model), io.Combo.Input("model_file", options=sorted(files), upload=io.UploadType.model),
io.Load3DAnimation.Input(id="image"), io.Load3DAnimation.Input("image"),
io.Int.Input(id="width", default=1024, min=1, max=4096, step=1), io.Int.Input("width", default=1024, min=1, max=4096, step=1),
io.Int.Input(id="height", default=1024, min=1, max=4096, step=1), io.Int.Input("height", default=1024, min=1, max=4096, step=1),
], ],
outputs=[ outputs=[
io.Image.Output(display_name="image"), io.Image.Output(display_name="image"),
@ -140,8 +140,8 @@ class Preview3D(io.ComfyNode):
is_experimental=True, is_experimental=True,
is_output_node=True, is_output_node=True,
inputs=[ inputs=[
io.String.Input(id="model_file", default="", multiline=False), io.String.Input("model_file", default="", multiline=False),
io.Load3DCamera.Input(id="camera_info", optional=True), io.Load3DCamera.Input("camera_info", optional=True),
], ],
outputs=[], outputs=[],
) )
@ -161,8 +161,8 @@ class Preview3DAnimation(io.ComfyNode):
is_experimental=True, is_experimental=True,
is_output_node=True, is_output_node=True,
inputs=[ inputs=[
io.String.Input(id="model_file", default="", multiline=False), io.String.Input("model_file", default="", multiline=False),
io.Load3DCamera.Input(id="camera_info", optional=True), io.Load3DCamera.Input("camera_info", optional=True),
], ],
outputs=[], outputs=[],
) )

View File

@ -91,10 +91,10 @@ class LoraSave(io.ComfyNode):
category="_for_testing", category="_for_testing",
is_output_node=True, is_output_node=True,
inputs=[ inputs=[
io.String.Input(id="filename_prefix", default="loras/ComfyUI_extracted_lora"), io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"),
io.Int.Input(id="rank", default=8, min=1, max=4096, step=1), io.Int.Input("rank", default=8, min=1, max=4096, step=1),
io.Combo.Input(id="lora_type", options=list(LORA_TYPES.keys())), io.Combo.Input("lora_type", options=list(LORA_TYPES.keys())),
io.Boolean.Input(id="bias_diff", default=True), io.Boolean.Input("bias_diff", default=True),
io.Model.Input( io.Model.Input(
id="model_diff", optional=True, tooltip="The ModelSubtract output to be converted to a lora." id="model_diff", optional=True, tooltip="The ModelSubtract output to be converted to a lora."
), ),

View File

@ -93,10 +93,10 @@ class EmptyLTXVLatentVideo(io.ComfyNode):
node_id="EmptyLTXVLatentVideo_V3", node_id="EmptyLTXVLatentVideo_V3",
category="latent/video/ltxv", category="latent/video/ltxv",
inputs=[ inputs=[
io.Int.Input(id="width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32),
io.Int.Input(id="height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32),
io.Int.Input(id="length", default=97, min=1, max=nodes.MAX_RESOLUTION, step=8), io.Int.Input("length", default=97, min=1, max=nodes.MAX_RESOLUTION, step=8),
io.Int.Input(id="batch_size", default=1, min=1, max=4096), io.Int.Input("batch_size", default=1, min=1, max=4096),
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
@ -122,10 +122,10 @@ class LTXVAddGuide(io.ComfyNode):
node_id="LTXVAddGuide_V3", node_id="LTXVAddGuide_V3",
category="conditioning/video_models", category="conditioning/video_models",
inputs=[ inputs=[
io.Conditioning.Input(id="positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input(id="negative"), io.Conditioning.Input("negative"),
io.Vae.Input(id="vae"), io.Vae.Input("vae"),
io.Latent.Input(id="latent"), io.Latent.Input("latent"),
io.Image.Input( io.Image.Input(
id="image", id="image",
tooltip="Image or video to condition the latent video on. Must be 8*n + 1 frames. " tooltip="Image or video to condition the latent video on. Must be 8*n + 1 frames. "
@ -141,12 +141,12 @@ class LTXVAddGuide(io.ComfyNode):
"For videos with 9+ frames, frame_idx must be divisible by 8, otherwise it will be rounded " "For videos with 9+ frames, frame_idx must be divisible by 8, otherwise it will be rounded "
"down to the nearest multiple of 8. Negative values are counted from the end of the video.", "down to the nearest multiple of 8. Negative values are counted from the end of the video.",
), ),
io.Float.Input(id="strength", default=1.0, min=0.0, max=1.0, step=0.01), io.Float.Input("strength", default=1.0, min=0.0, max=1.0, step=0.01),
], ],
outputs=[ outputs=[
io.Conditioning.Output(id="positive_out", display_name="positive"), io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(id="negative_out", display_name="negative"), io.Conditioning.Output(display_name="negative"),
io.Latent.Output(id="latent_out", display_name="latent"), io.Latent.Output(display_name="latent"),
], ],
) )
@ -282,13 +282,13 @@ class LTXVConditioning(io.ComfyNode):
node_id="LTXVConditioning_V3", node_id="LTXVConditioning_V3",
category="conditioning/video_models", category="conditioning/video_models",
inputs=[ inputs=[
io.Conditioning.Input(id="positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input(id="negative"), io.Conditioning.Input("negative"),
io.Float.Input(id="frame_rate", default=25.0, min=0.0, max=1000.0, step=0.01), io.Float.Input("frame_rate", default=25.0, min=0.0, max=1000.0, step=0.01),
], ],
outputs=[ outputs=[
io.Conditioning.Output(id="positive_out", display_name="positive"), io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(id="negative_out", display_name="negative"), io.Conditioning.Output(display_name="negative"),
], ],
) )
@ -306,14 +306,14 @@ class LTXVCropGuides(io.ComfyNode):
node_id="LTXVCropGuides_V3", node_id="LTXVCropGuides_V3",
category="conditioning/video_models", category="conditioning/video_models",
inputs=[ inputs=[
io.Conditioning.Input(id="positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input(id="negative"), io.Conditioning.Input("negative"),
io.Latent.Input(id="latent"), io.Latent.Input("latent"),
], ],
outputs=[ outputs=[
io.Conditioning.Output(id="positive_out", display_name="positive"), io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(id="negative_out", display_name="negative"), io.Conditioning.Output(display_name="negative"),
io.Latent.Output(id="latent_out", display_name="latent"), io.Latent.Output(display_name="latent"),
], ],
) )
@ -342,19 +342,19 @@ class LTXVImgToVideo(io.ComfyNode):
node_id="LTXVImgToVideo_V3", node_id="LTXVImgToVideo_V3",
category="conditioning/video_models", category="conditioning/video_models",
inputs=[ inputs=[
io.Conditioning.Input(id="positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input(id="negative"), io.Conditioning.Input("negative"),
io.Vae.Input(id="vae"), io.Vae.Input("vae"),
io.Image.Input(id="image"), io.Image.Input("image"),
io.Int.Input(id="width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("width", default=768, min=64, max=nodes.MAX_RESOLUTION, step=32),
io.Int.Input(id="height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32), io.Int.Input("height", default=512, min=64, max=nodes.MAX_RESOLUTION, step=32),
io.Int.Input(id="length", default=97, min=9, max=nodes.MAX_RESOLUTION, step=8), io.Int.Input("length", default=97, min=9, max=nodes.MAX_RESOLUTION, step=8),
io.Int.Input(id="batch_size", default=1, min=1, max=4096), io.Int.Input("batch_size", default=1, min=1, max=4096),
io.Float.Input(id="strength", default=1.0, min=0.0, max=1.0), io.Float.Input("strength", default=1.0, min=0.0, max=1.0),
], ],
outputs=[ outputs=[
io.Conditioning.Output(id="positive_out", display_name="positive"), io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(id="negative_out", display_name="negative"), io.Conditioning.Output(display_name="negative"),
io.Latent.Output(display_name="latent"), io.Latent.Output(display_name="latent"),
], ],
) )
@ -390,13 +390,13 @@ class LTXVPreprocess(io.ComfyNode):
node_id="LTXVPreprocess_V3", node_id="LTXVPreprocess_V3",
category="image", category="image",
inputs=[ inputs=[
io.Image.Input(id="image"), io.Image.Input("image"),
io.Int.Input( io.Int.Input(
id="img_compression", default=35, min=0, max=100, tooltip="Amount of compression to apply on image." id="img_compression", default=35, min=0, max=100, tooltip="Amount of compression to apply on image."
), ),
], ],
outputs=[ outputs=[
io.Image.Output(id="output_image", display_name="output_image"), io.Image.Output(display_name="output_image"),
], ],
) )
@ -415,9 +415,9 @@ class LTXVScheduler(io.ComfyNode):
node_id="LTXVScheduler_V3", node_id="LTXVScheduler_V3",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
inputs=[ inputs=[
io.Int.Input(id="steps", default=20, min=1, max=10000), io.Int.Input("steps", default=20, min=1, max=10000),
io.Float.Input(id="max_shift", default=2.05, min=0.0, max=100.0, step=0.01), io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
io.Float.Input(id="base_shift", default=0.95, min=0.0, max=100.0, step=0.01), io.Float.Input("base_shift", default=0.95, min=0.0, max=100.0, step=0.01),
io.Boolean.Input( io.Boolean.Input(
id="stretch", id="stretch",
default=True, default=True,
@ -431,7 +431,7 @@ class LTXVScheduler(io.ComfyNode):
step=0.01, step=0.01,
tooltip="The terminal value of the sigmas after stretching.", tooltip="The terminal value of the sigmas after stretching.",
), ),
io.Latent.Input(id="latent", optional=True), io.Latent.Input("latent", optional=True),
], ],
outputs=[ outputs=[
io.Sigmas.Output(), io.Sigmas.Output(),
@ -478,10 +478,10 @@ class ModelSamplingLTXV(io.ComfyNode):
node_id="ModelSamplingLTXV_V3", node_id="ModelSamplingLTXV_V3",
category="advanced/model", category="advanced/model",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.Float.Input(id="max_shift", default=2.05, min=0.0, max=100.0, step=0.01), io.Float.Input("max_shift", default=2.05, min=0.0, max=100.0, step=0.01),
io.Float.Input(id="base_shift", default=0.95, min=0.0, max=100.0, step=0.01), io.Float.Input("base_shift", default=0.95, min=0.0, max=100.0, step=0.01),
io.Latent.Input(id="latent", optional=True), io.Latent.Input("latent", optional=True),
], ],
outputs=[ outputs=[
io.Model.Output(), io.Model.Output(),

View File

@ -27,9 +27,9 @@ class CLIPTextEncodeLumina2(io.ComfyNode):
description="Encodes a system prompt and a user prompt using a CLIP model into an embedding " description="Encodes a system prompt and a user prompt using a CLIP model into an embedding "
"that can be used to guide the diffusion model towards generating specific images.", "that can be used to guide the diffusion model towards generating specific images.",
inputs=[ inputs=[
io.Combo.Input(id="system_prompt", options=list(cls.SYSTEM_PROMPT.keys()), tooltip=cls.SYSTEM_PROMPT_TIP), io.Combo.Input("system_prompt", options=list(cls.SYSTEM_PROMPT.keys()), tooltip=cls.SYSTEM_PROMPT_TIP),
io.String.Input(id="user_prompt", multiline=True, dynamic_prompts=True, tooltip="The text to be encoded."), io.String.Input("user_prompt", multiline=True, dynamic_prompts=True, tooltip="The text to be encoded."),
io.Clip.Input(id="clip", tooltip="The CLIP model used for encoding the text."), io.Clip.Input("clip", tooltip="The CLIP model used for encoding the text."),
], ],
outputs=[ outputs=[
io.Conditioning.Output(tooltip="A conditioning containing the embedded text used to guide the diffusion model."), io.Conditioning.Output(tooltip="A conditioning containing the embedded text used to guide the diffusion model."),
@ -56,9 +56,9 @@ class RenormCFG(io.ComfyNode):
node_id="RenormCFG_V3", node_id="RenormCFG_V3",
category="advanced/model", category="advanced/model",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.Float.Input(id="cfg_trunc", default=100, min=0.0, max=100.0, step=0.01), io.Float.Input("cfg_trunc", default=100, min=0.0, max=100.0, step=0.01),
io.Float.Input(id="renorm_cfg", default=1.0, min=0.0, max=100.0, step=0.01), io.Float.Input("renorm_cfg", default=1.0, min=0.0, max=100.0, step=0.01),
], ],
outputs=[ outputs=[
io.Model.Output(), io.Model.Output(),

View File

@ -23,12 +23,12 @@ class ImageRGBToYUV(io.ComfyNode):
node_id="ImageRGBToYUV_V3", node_id="ImageRGBToYUV_V3",
category="image/batch", category="image/batch",
inputs=[ inputs=[
io.Image.Input(id="image"), io.Image.Input("image"),
], ],
outputs=[ outputs=[
io.Image.Output(id="Y", display_name="Y"), io.Image.Output(display_name="Y"),
io.Image.Output(id="U", display_name="U"), io.Image.Output(display_name="U"),
io.Image.Output(id="V", display_name="V"), io.Image.Output(display_name="V"),
], ],
) )
@ -45,9 +45,9 @@ class ImageYUVToRGB(io.ComfyNode):
node_id="ImageYUVToRGB_V3", node_id="ImageYUVToRGB_V3",
category="image/batch", category="image/batch",
inputs=[ inputs=[
io.Image.Input(id="Y"), io.Image.Input("Y"),
io.Image.Input(id="U"), io.Image.Input("U"),
io.Image.Input(id="V"), io.Image.Input("V"),
], ],
outputs=[ outputs=[
io.Image.Output(), io.Image.Output(),
@ -68,9 +68,9 @@ class Morphology(io.ComfyNode):
display_name="ImageMorphology _V3", display_name="ImageMorphology _V3",
category="image/postprocessing", category="image/postprocessing",
inputs=[ inputs=[
io.Image.Input(id="image"), io.Image.Input("image"),
io.Combo.Input(id="operation", options=["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"]), io.Combo.Input("operation", options=["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"]),
io.Int.Input(id="kernel_size", default=3, min=3, max=999, step=1), io.Int.Input("kernel_size", default=3, min=3, max=999, step=1),
], ],
outputs=[ outputs=[
io.Image.Output(), io.Image.Output(),

View File

@ -33,9 +33,9 @@ class OptimalStepsScheduler(io.ComfyNode):
node_id="OptimalStepsScheduler_V3", node_id="OptimalStepsScheduler_V3",
category="sampling/custom_sampling/schedulers", category="sampling/custom_sampling/schedulers",
inputs=[ inputs=[
io.Combo.Input(id="model_type", options=["FLUX", "Wan", "Chroma"]), io.Combo.Input("model_type", options=["FLUX", "Wan", "Chroma"]),
io.Int.Input(id="steps", default=20, min=3, max=1000), io.Int.Input("steps", default=20, min=3, max=1000),
io.Float.Input(id="denoise", default=1.0, min=0.0, max=1.0, step=0.01), io.Float.Input("denoise", default=1.0, min=0.0, max=1.0, step=0.01),
], ],
outputs=[ outputs=[
io.Sigmas.Output(), io.Sigmas.Output(),

View File

@ -17,8 +17,8 @@ class PerturbedAttentionGuidance(io.ComfyNode):
node_id="PerturbedAttentionGuidance_V3", node_id="PerturbedAttentionGuidance_V3",
category="model_patches/unet", category="model_patches/unet",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.Float.Input(id="scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01), io.Float.Input("scale", default=3.0, min=0.0, max=100.0, step=0.01, round=0.01),
], ],
outputs=[ outputs=[
io.Model.Output(), io.Model.Output(),

View File

@ -88,12 +88,12 @@ class PerpNegGuider(io.ComfyNode):
node_id="PerpNegGuider_V3", node_id="PerpNegGuider_V3",
category="_for_testing", category="_for_testing",
inputs=[ inputs=[
io.Model.Input(id="model"), io.Model.Input("model"),
io.Conditioning.Input(id="positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input(id="negative"), io.Conditioning.Input("negative"),
io.Conditioning.Input(id="empty_conditioning"), io.Conditioning.Input("empty_conditioning"),
io.Float.Input(id="cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01), io.Float.Input("cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01),
io.Float.Input(id="neg_scale", default=1.0, min=0.0, max=100.0, step=0.01), io.Float.Input("neg_scale", default=1.0, min=0.0, max=100.0, step=0.01),
], ],
outputs=[ outputs=[
io.Guider.Output(), io.Guider.Output(),