diff --git a/comfy_api/v3/io.py b/comfy_api/v3/io.py index eac50330c..8903b3d78 100644 --- a/comfy_api/v3/io.py +++ b/comfy_api/v3/io.py @@ -390,19 +390,19 @@ class String(ComfyTypeIO): class Input(WidgetInputV3): '''String input.''' def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, - multiline=False, placeholder: str=None, default: str=None, dynamicPrompts: bool=None, + multiline=False, placeholder: str=None, default: str=None, dynamic_prompts: bool=None, socketless: bool=None, force_input: bool=None): super().__init__(id, display_name, optional, tooltip, lazy, default, socketless, None, force_input) self.multiline = multiline self.placeholder = placeholder - self.dynamicPrompts = dynamicPrompts + self.dynamic_prompts = dynamic_prompts self.default: str def as_dict_V1(self): return super().as_dict_V1() | prune_dict({ "multiline": self.multiline, "placeholder": self.placeholder, - "dynamicPrompts": self.dynamicPrompts, + "dynamicPrompts": self.dynamic_prompts, }) @comfytype(io_type="COMBO") diff --git a/comfy_extras/v3/nodes_ace.py b/comfy_extras/v3/nodes_ace.py new file mode 100644 index 000000000..e8a75f6dc --- /dev/null +++ b/comfy_extras/v3/nodes_ace.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +import torch + +import comfy.model_management +import node_helpers +from comfy_api.v3 import io + + +class TextEncodeAceStepAudio(io.ComfyNodeV3): + @classmethod + def define_schema(cls): + return io.SchemaV3( + node_id="TextEncodeAceStepAudio_V3", + category="conditioning", + inputs=[ + io.Clip.Input("clip"), + io.String.Input("tags", multiline=True, dynamic_prompts=True), + io.String.Input("lyrics", multiline=True, dynamic_prompts=True), + io.Float.Input("lyrics_strength", default=1.0, min=0.0, max=10.0, step=0.01), + ], + outputs=[io.Conditioning.Output()], + ) + + @classmethod + def execute(cls, clip, tags, lyrics, lyrics_strength) -> io.NodeOutput: + conditioning = clip.encode_from_tokens_scheduled(clip.tokenize(tags, lyrics=lyrics)) + conditioning = node_helpers.conditioning_set_values(conditioning, {"lyrics_strength": lyrics_strength}) + return io.NodeOutput(conditioning) + + +class EmptyAceStepLatentAudio(io.ComfyNodeV3): + @classmethod + def define_schema(cls): + return io.SchemaV3( + node_id="EmptyAceStepLatentAudio_V3", + category="latent/audio", + inputs=[ + io.Float.Input("seconds", default=120.0, min=1.0, max=1000.0, step=0.1), + io.Int.Input( + "batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch." + ), + ], + outputs=[io.Latent.Output()], + ) + + @classmethod + def execute(cls, seconds, batch_size) -> io.NodeOutput: + length = int(seconds * 44100 / 512 / 8) + latent = torch.zeros([batch_size, 8, 16, length], device=comfy.model_management.intermediate_device()) + return io.NodeOutput({"samples": latent, "type": "audio"}) + + +NODES_LIST: list[type[io.ComfyNodeV3]] = [ + TextEncodeAceStepAudio, + EmptyAceStepLatentAudio, +] diff --git a/nodes.py b/nodes.py index 51d423053..ce8222ebe 100644 --- a/nodes.py +++ b/nodes.py @@ -2299,14 +2299,15 @@ def init_builtin_extra_nodes(): "nodes_tcfg.py", "nodes_v3_test.py", "nodes_v1_test.py", + "v3/nodes_ace.py", "v3/nodes_audio.py", "v3/nodes_controlnet.py", "v3/nodes_images.py", "v3/nodes_mask.py", "v3/nodes_preview_any.py", "v3/nodes_primitive.py", - "v3/nodes_webcam.py", "v3/nodes_stable_cascade.py", + "v3/nodes_webcam.py", ] import_failed = []