mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-06-08 15:17:14 +00:00
* Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Add --comfy-api-base launch arg (#13) * Add instructions for staging development. (#14) * remove validation to make it easier to run against LAN copies of the API * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) * Refactor util functions (#20) * Add VIDEO type (#21) * Add rest of Luma node functionality (#19) * Fix image_luma_ref not working (#28) * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * Add utils to map from pydantic model fields to comfy node inputs (#30) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Add Pixverse nodes (#46) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Minimax Image to Video node + Cleanup (#51) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Add types and doctstrings to utils file (#64) * Fix: `PollingOperation` progress bar update progress by absolute value (#65) * Use common download function in kling nodes module (#67) * Fix: Luma video nodes in `api nodes/image` category (#68) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add preview to Save SVG node (#74) * change default poll interval (#76), rework veo2 * Add Pixverse and updated Kling types (#75) * Added Pixverse Image to VIdeo node (#77) * Add Pixverse Transition Video node (#79) * Proper ray-1-6 support as fix has been applied in backend (#80) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Temporary Fix for Runway (#87) * Added Stability Stable Image Ultra node (#86) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Fix: typo in node name "Stabiliy" => "Stability" (#91) * Add String (Multiline) node (#93) * Update Pika Duration and Resolution options (#94) * Change base branch to master. Not main. (#95) * Fix UploadRequest file_name param (#98) * Removed Infinite Style Library until later (#99) * fix ideogram style types (#100) * fix multi image return (#101) * add metadata saving to SVG (#102) * Bump templates version to include API node template workflows (#104) * Fix: `download_url_to_video_output` return type (#103) * fix 4o generation bug (#106) * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Fix: datamodel-codegen sets string#binary type to non-existent `bytes_aliased` variable (#114) * Fix: Dall-e 2 not setting request content-type dynamically (#113) * Default request timeout: one hour. (#116) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Add 8 nodes - 4 BFL, 4 Stability (#117) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Use upstream PR: Support saving Comfy VIDEO type to buffer (#123) * Use Upstream PR: "Fix: Error creating video when sliced audio tensor chunks are non-c-contiguous" (#127) * Improve audio upload utils (#128) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Fix: Luma I2I fails when weight is <=0.01 (#132) * Change category of `LumaConcepts` node from image to video (#133) * Fix: `image.shape` accessed before `image` is null-checked (#134) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * Fixed Kling: Check attributes of pydantic types. (#144) * Bump `comfyui-workflow-templates` version (#142) * [Kling] Print response data when error validating response (#146) * Fix: error validating Kling image response, trying to use `"key" in` on Pydantic class instance (#147) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * [Kling] Fix typo in node description (#150) * [Kling] Fix: CFG min/max not being enforced (#151) * Rebase launch-rebase (private) on prep-branch (public copy of master) (#153) * Bump templates version (#154) * Fix: Kling image gen nodes don't return entire batch when `n` > 1 (#152) * Remove pixverse_template from PixVerse Transition Video node (#155) * Invert image_weight value on Luma Image to Image node (#156) * Invert and resize mask for Ideogram V3 node to match masking conventions (#158) * [Kling] Fix: image generation nodes not returning Tuple (#159) * [Bug] [Kling] Fix Kling camera control (#161) * Kling Image Gen v2 + improve node descriptions for Flux/OpenAI (#160) * [Kling] Don't return video_id from dual effect video (#162) * Bump frontend to 1.18.8 (#163) * Use 3.9 compat syntax (#164) * Use Python 3.10 * add example env var * Update templates to 0.1.11 * Bump frontend to 1.18.9 --------- Co-authored-by: Robin Huang <robin.j.huang@gmail.com> Co-authored-by: Christian Byrne <cbyrne@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com>
703 lines
24 KiB
Python
703 lines
24 KiB
Python
from inspect import cleandoc
|
|
from comfy.comfy_types.node_typing import IO, ComfyNodeABC
|
|
from comfy_api.input_impl.video_types import VideoFromFile
|
|
from comfy_api_nodes.apis.luma_api import (
|
|
LumaImageModel,
|
|
LumaVideoModel,
|
|
LumaVideoOutputResolution,
|
|
LumaVideoModelOutputDuration,
|
|
LumaAspectRatio,
|
|
LumaState,
|
|
LumaImageGenerationRequest,
|
|
LumaGenerationRequest,
|
|
LumaGeneration,
|
|
LumaCharacterRef,
|
|
LumaModifyImageRef,
|
|
LumaImageIdentity,
|
|
LumaReference,
|
|
LumaReferenceChain,
|
|
LumaImageReference,
|
|
LumaKeyframes,
|
|
LumaConceptChain,
|
|
LumaIO,
|
|
get_luma_concepts,
|
|
)
|
|
from comfy_api_nodes.apis.client import (
|
|
ApiEndpoint,
|
|
HttpMethod,
|
|
SynchronousOperation,
|
|
PollingOperation,
|
|
EmptyRequest,
|
|
)
|
|
from comfy_api_nodes.apinode_utils import (
|
|
upload_images_to_comfyapi,
|
|
process_image_response,
|
|
validate_string,
|
|
)
|
|
|
|
import requests
|
|
import torch
|
|
from io import BytesIO
|
|
|
|
|
|
class LumaReferenceNode(ComfyNodeABC):
|
|
"""
|
|
Holds an image and weight for use with Luma Generate Image node.
|
|
"""
|
|
|
|
RETURN_TYPES = (LumaIO.LUMA_REF,)
|
|
RETURN_NAMES = ("luma_ref",)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "create_luma_reference"
|
|
CATEGORY = "api node/image/Luma"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"image": (
|
|
IO.IMAGE,
|
|
{
|
|
"tooltip": "Image to use as reference.",
|
|
},
|
|
),
|
|
"weight": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 1.0,
|
|
"min": 0.0,
|
|
"max": 1.0,
|
|
"step": 0.01,
|
|
"tooltip": "Weight of image reference.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {"luma_ref": (LumaIO.LUMA_REF,)},
|
|
}
|
|
|
|
def create_luma_reference(
|
|
self, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None
|
|
):
|
|
if luma_ref is not None:
|
|
luma_ref = luma_ref.clone()
|
|
else:
|
|
luma_ref = LumaReferenceChain()
|
|
luma_ref.add(LumaReference(image=image, weight=round(weight, 2)))
|
|
return (luma_ref,)
|
|
|
|
|
|
class LumaConceptsNode(ComfyNodeABC):
|
|
"""
|
|
Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.
|
|
"""
|
|
|
|
RETURN_TYPES = (LumaIO.LUMA_CONCEPTS,)
|
|
RETURN_NAMES = ("luma_concepts",)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "create_concepts"
|
|
CATEGORY = "api node/video/Luma"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"concept1": (get_luma_concepts(include_none=True),),
|
|
"concept2": (get_luma_concepts(include_none=True),),
|
|
"concept3": (get_luma_concepts(include_none=True),),
|
|
"concept4": (get_luma_concepts(include_none=True),),
|
|
},
|
|
"optional": {
|
|
"luma_concepts": (
|
|
LumaIO.LUMA_CONCEPTS,
|
|
{
|
|
"tooltip": "Optional Camera Concepts to add to the ones chosen here."
|
|
},
|
|
),
|
|
},
|
|
}
|
|
|
|
def create_concepts(
|
|
self,
|
|
concept1: str,
|
|
concept2: str,
|
|
concept3: str,
|
|
concept4: str,
|
|
luma_concepts: LumaConceptChain = None,
|
|
):
|
|
chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4])
|
|
if luma_concepts is not None:
|
|
chain = luma_concepts.clone_and_merge(chain)
|
|
return (chain,)
|
|
|
|
|
|
class LumaImageGenerationNode(ComfyNodeABC):
|
|
"""
|
|
Generates images synchronously based on prompt and aspect ratio.
|
|
"""
|
|
|
|
RETURN_TYPES = (IO.IMAGE,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/image/Luma"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the image generation",
|
|
},
|
|
),
|
|
"model": ([model.value for model in LumaImageModel],),
|
|
"aspect_ratio": (
|
|
[ratio.value for ratio in LumaAspectRatio],
|
|
{
|
|
"default": LumaAspectRatio.ratio_16_9,
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
|
},
|
|
),
|
|
"style_image_weight": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 1.0,
|
|
"min": 0.0,
|
|
"max": 1.0,
|
|
"step": 0.01,
|
|
"tooltip": "Weight of style image. Ignored if no style_image provided.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
"image_luma_ref": (
|
|
LumaIO.LUMA_REF,
|
|
{
|
|
"tooltip": "Luma Reference node connection to influence generation with input images; up to 4 images can be considered."
|
|
},
|
|
),
|
|
"style_image": (
|
|
IO.IMAGE,
|
|
{"tooltip": "Style reference image; only 1 image will be used."},
|
|
),
|
|
"character_image": (
|
|
IO.IMAGE,
|
|
{
|
|
"tooltip": "Character reference images; can be a batch of multiple, up to 4 images can be considered."
|
|
},
|
|
),
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
def api_call(
|
|
self,
|
|
prompt: str,
|
|
model: str,
|
|
aspect_ratio: str,
|
|
seed,
|
|
style_image_weight: float,
|
|
image_luma_ref: LumaReferenceChain = None,
|
|
style_image: torch.Tensor = None,
|
|
character_image: torch.Tensor = None,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
validate_string(prompt, strip_whitespace=True, min_length=3)
|
|
# handle image_luma_ref
|
|
api_image_ref = None
|
|
if image_luma_ref is not None:
|
|
api_image_ref = self._convert_luma_refs(
|
|
image_luma_ref, max_refs=4, auth_token=auth_token
|
|
)
|
|
# handle style_luma_ref
|
|
api_style_ref = None
|
|
if style_image is not None:
|
|
api_style_ref = self._convert_style_image(
|
|
style_image, weight=style_image_weight, auth_token=auth_token
|
|
)
|
|
# handle character_ref images
|
|
character_ref = None
|
|
if character_image is not None:
|
|
download_urls = upload_images_to_comfyapi(
|
|
character_image, max_images=4, auth_token=auth_token
|
|
)
|
|
character_ref = LumaCharacterRef(
|
|
identity0=LumaImageIdentity(images=download_urls)
|
|
)
|
|
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/luma/generations/image",
|
|
method=HttpMethod.POST,
|
|
request_model=LumaImageGenerationRequest,
|
|
response_model=LumaGeneration,
|
|
),
|
|
request=LumaImageGenerationRequest(
|
|
prompt=prompt,
|
|
model=model,
|
|
aspect_ratio=aspect_ratio,
|
|
image_ref=api_image_ref,
|
|
style_ref=api_style_ref,
|
|
character_ref=character_ref,
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
response_api: LumaGeneration = operation.execute()
|
|
|
|
operation = PollingOperation(
|
|
poll_endpoint=ApiEndpoint(
|
|
path=f"/proxy/luma/generations/{response_api.id}",
|
|
method=HttpMethod.GET,
|
|
request_model=EmptyRequest,
|
|
response_model=LumaGeneration,
|
|
),
|
|
completed_statuses=[LumaState.completed],
|
|
failed_statuses=[LumaState.failed],
|
|
status_extractor=lambda x: x.state,
|
|
auth_token=auth_token,
|
|
)
|
|
response_poll = operation.execute()
|
|
|
|
img_response = requests.get(response_poll.assets.image)
|
|
img = process_image_response(img_response)
|
|
return (img,)
|
|
|
|
def _convert_luma_refs(
|
|
self, luma_ref: LumaReferenceChain, max_refs: int, auth_token=None
|
|
):
|
|
luma_urls = []
|
|
ref_count = 0
|
|
for ref in luma_ref.refs:
|
|
download_urls = upload_images_to_comfyapi(
|
|
ref.image, max_images=1, auth_token=auth_token
|
|
)
|
|
luma_urls.append(download_urls[0])
|
|
ref_count += 1
|
|
if ref_count >= max_refs:
|
|
break
|
|
return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs)
|
|
|
|
def _convert_style_image(
|
|
self, style_image: torch.Tensor, weight: float, auth_token=None
|
|
):
|
|
chain = LumaReferenceChain(
|
|
first_ref=LumaReference(image=style_image, weight=weight)
|
|
)
|
|
return self._convert_luma_refs(chain, max_refs=1, auth_token=auth_token)
|
|
|
|
|
|
class LumaImageModifyNode(ComfyNodeABC):
|
|
"""
|
|
Modifies images synchronously based on prompt and aspect ratio.
|
|
"""
|
|
|
|
RETURN_TYPES = (IO.IMAGE,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/image/Luma"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"image": (IO.IMAGE,),
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the image generation",
|
|
},
|
|
),
|
|
"image_weight": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 0.1,
|
|
"min": 0.0,
|
|
"max": 0.98,
|
|
"step": 0.01,
|
|
"tooltip": "Weight of the image; the closer to 1.0, the less the image will be modified.",
|
|
},
|
|
),
|
|
"model": ([model.value for model in LumaImageModel],),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
def api_call(
|
|
self,
|
|
prompt: str,
|
|
model: str,
|
|
image: torch.Tensor,
|
|
image_weight: float,
|
|
seed,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
# first, upload image
|
|
download_urls = upload_images_to_comfyapi(
|
|
image, max_images=1, auth_token=auth_token
|
|
)
|
|
image_url = download_urls[0]
|
|
# next, make Luma call with download url provided
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/luma/generations/image",
|
|
method=HttpMethod.POST,
|
|
request_model=LumaImageGenerationRequest,
|
|
response_model=LumaGeneration,
|
|
),
|
|
request=LumaImageGenerationRequest(
|
|
prompt=prompt,
|
|
model=model,
|
|
modify_image_ref=LumaModifyImageRef(
|
|
url=image_url, weight=round(max(min(1.0-image_weight, 0.98), 0.0), 2)
|
|
),
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
response_api: LumaGeneration = operation.execute()
|
|
|
|
operation = PollingOperation(
|
|
poll_endpoint=ApiEndpoint(
|
|
path=f"/proxy/luma/generations/{response_api.id}",
|
|
method=HttpMethod.GET,
|
|
request_model=EmptyRequest,
|
|
response_model=LumaGeneration,
|
|
),
|
|
completed_statuses=[LumaState.completed],
|
|
failed_statuses=[LumaState.failed],
|
|
status_extractor=lambda x: x.state,
|
|
auth_token=auth_token,
|
|
)
|
|
response_poll = operation.execute()
|
|
|
|
img_response = requests.get(response_poll.assets.image)
|
|
img = process_image_response(img_response)
|
|
return (img,)
|
|
|
|
|
|
class LumaTextToVideoGenerationNode(ComfyNodeABC):
|
|
"""
|
|
Generates videos synchronously based on prompt and output_size.
|
|
"""
|
|
|
|
RETURN_TYPES = (IO.VIDEO,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/video/Luma"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the video generation",
|
|
},
|
|
),
|
|
"model": ([model.value for model in LumaVideoModel],),
|
|
"aspect_ratio": (
|
|
[ratio.value for ratio in LumaAspectRatio],
|
|
{
|
|
"default": LumaAspectRatio.ratio_16_9,
|
|
},
|
|
),
|
|
"resolution": (
|
|
[resolution.value for resolution in LumaVideoOutputResolution],
|
|
{
|
|
"default": LumaVideoOutputResolution.res_540p,
|
|
},
|
|
),
|
|
"duration": ([dur.value for dur in LumaVideoModelOutputDuration],),
|
|
"loop": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
"luma_concepts": (
|
|
LumaIO.LUMA_CONCEPTS,
|
|
{
|
|
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
|
|
},
|
|
),
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
def api_call(
|
|
self,
|
|
prompt: str,
|
|
model: str,
|
|
aspect_ratio: str,
|
|
resolution: str,
|
|
duration: str,
|
|
loop: bool,
|
|
seed,
|
|
luma_concepts: LumaConceptChain = None,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
validate_string(prompt, strip_whitespace=False, min_length=3)
|
|
duration = duration if model != LumaVideoModel.ray_1_6 else None
|
|
resolution = resolution if model != LumaVideoModel.ray_1_6 else None
|
|
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/luma/generations",
|
|
method=HttpMethod.POST,
|
|
request_model=LumaGenerationRequest,
|
|
response_model=LumaGeneration,
|
|
),
|
|
request=LumaGenerationRequest(
|
|
prompt=prompt,
|
|
model=model,
|
|
resolution=resolution,
|
|
aspect_ratio=aspect_ratio,
|
|
duration=duration,
|
|
loop=loop,
|
|
concepts=luma_concepts.create_api_model() if luma_concepts else None,
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
response_api: LumaGeneration = operation.execute()
|
|
|
|
operation = PollingOperation(
|
|
poll_endpoint=ApiEndpoint(
|
|
path=f"/proxy/luma/generations/{response_api.id}",
|
|
method=HttpMethod.GET,
|
|
request_model=EmptyRequest,
|
|
response_model=LumaGeneration,
|
|
),
|
|
completed_statuses=[LumaState.completed],
|
|
failed_statuses=[LumaState.failed],
|
|
status_extractor=lambda x: x.state,
|
|
auth_token=auth_token,
|
|
)
|
|
response_poll = operation.execute()
|
|
|
|
vid_response = requests.get(response_poll.assets.video)
|
|
return (VideoFromFile(BytesIO(vid_response.content)),)
|
|
|
|
|
|
class LumaImageToVideoGenerationNode(ComfyNodeABC):
|
|
"""
|
|
Generates videos synchronously based on prompt, input images, and output_size.
|
|
"""
|
|
|
|
RETURN_TYPES = (IO.VIDEO,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/video/Luma"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the video generation",
|
|
},
|
|
),
|
|
"model": ([model.value for model in LumaVideoModel],),
|
|
# "aspect_ratio": ([ratio.value for ratio in LumaAspectRatio], {
|
|
# "default": LumaAspectRatio.ratio_16_9,
|
|
# }),
|
|
"resolution": (
|
|
[resolution.value for resolution in LumaVideoOutputResolution],
|
|
{
|
|
"default": LumaVideoOutputResolution.res_540p,
|
|
},
|
|
),
|
|
"duration": ([dur.value for dur in LumaVideoModelOutputDuration],),
|
|
"loop": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
"first_image": (
|
|
IO.IMAGE,
|
|
{"tooltip": "First frame of generated video."},
|
|
),
|
|
"last_image": (IO.IMAGE, {"tooltip": "Last frame of generated video."}),
|
|
"luma_concepts": (
|
|
LumaIO.LUMA_CONCEPTS,
|
|
{
|
|
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
|
|
},
|
|
),
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
def api_call(
|
|
self,
|
|
prompt: str,
|
|
model: str,
|
|
resolution: str,
|
|
duration: str,
|
|
loop: bool,
|
|
seed,
|
|
first_image: torch.Tensor = None,
|
|
last_image: torch.Tensor = None,
|
|
luma_concepts: LumaConceptChain = None,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
if first_image is None and last_image is None:
|
|
raise Exception(
|
|
"At least one of first_image and last_image requires an input."
|
|
)
|
|
keyframes = self._convert_to_keyframes(first_image, last_image, auth_token)
|
|
duration = duration if model != LumaVideoModel.ray_1_6 else None
|
|
resolution = resolution if model != LumaVideoModel.ray_1_6 else None
|
|
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/luma/generations",
|
|
method=HttpMethod.POST,
|
|
request_model=LumaGenerationRequest,
|
|
response_model=LumaGeneration,
|
|
),
|
|
request=LumaGenerationRequest(
|
|
prompt=prompt,
|
|
model=model,
|
|
aspect_ratio=LumaAspectRatio.ratio_16_9, # ignored, but still needed by the API for some reason
|
|
resolution=resolution,
|
|
duration=duration,
|
|
loop=loop,
|
|
keyframes=keyframes,
|
|
concepts=luma_concepts.create_api_model() if luma_concepts else None,
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
response_api: LumaGeneration = operation.execute()
|
|
|
|
operation = PollingOperation(
|
|
poll_endpoint=ApiEndpoint(
|
|
path=f"/proxy/luma/generations/{response_api.id}",
|
|
method=HttpMethod.GET,
|
|
request_model=EmptyRequest,
|
|
response_model=LumaGeneration,
|
|
),
|
|
completed_statuses=[LumaState.completed],
|
|
failed_statuses=[LumaState.failed],
|
|
status_extractor=lambda x: x.state,
|
|
auth_token=auth_token,
|
|
)
|
|
response_poll = operation.execute()
|
|
|
|
vid_response = requests.get(response_poll.assets.video)
|
|
return (VideoFromFile(BytesIO(vid_response.content)),)
|
|
|
|
def _convert_to_keyframes(
|
|
self,
|
|
first_image: torch.Tensor = None,
|
|
last_image: torch.Tensor = None,
|
|
auth_token=None,
|
|
):
|
|
if first_image is None and last_image is None:
|
|
return None
|
|
frame0 = None
|
|
frame1 = None
|
|
if first_image is not None:
|
|
download_urls = upload_images_to_comfyapi(
|
|
first_image, max_images=1, auth_token=auth_token
|
|
)
|
|
frame0 = LumaImageReference(type="image", url=download_urls[0])
|
|
if last_image is not None:
|
|
download_urls = upload_images_to_comfyapi(
|
|
last_image, max_images=1, auth_token=auth_token
|
|
)
|
|
frame1 = LumaImageReference(type="image", url=download_urls[0])
|
|
return LumaKeyframes(frame0=frame0, frame1=frame1)
|
|
|
|
|
|
# A dictionary that contains all nodes you want to export with their names
|
|
# NOTE: names should be globally unique
|
|
NODE_CLASS_MAPPINGS = {
|
|
"LumaImageNode": LumaImageGenerationNode,
|
|
"LumaImageModifyNode": LumaImageModifyNode,
|
|
"LumaVideoNode": LumaTextToVideoGenerationNode,
|
|
"LumaImageToVideoNode": LumaImageToVideoGenerationNode,
|
|
"LumaReferenceNode": LumaReferenceNode,
|
|
"LumaConceptsNode": LumaConceptsNode,
|
|
}
|
|
|
|
# A dictionary that contains the friendly/humanly readable titles for the nodes
|
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
"LumaImageNode": "Luma Text to Image",
|
|
"LumaImageModifyNode": "Luma Image to Image",
|
|
"LumaVideoNode": "Luma Text to Video",
|
|
"LumaImageToVideoNode": "Luma Image to Video",
|
|
"LumaReferenceNode": "Luma Reference",
|
|
"LumaConceptsNode": "Luma Concepts",
|
|
}
|