mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-09-10 19:46:38 +00:00
More API Nodes (#7956)
* Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Add --comfy-api-base launch arg (#13) * Add instructions for staging development. (#14) * remove validation to make it easier to run against LAN copies of the API * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) * Refactor util functions (#20) * Add VIDEO type (#21) * Add rest of Luma node functionality (#19) * Fix image_luma_ref not working (#28) * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * Add utils to map from pydantic model fields to comfy node inputs (#30) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Add Pixverse nodes (#46) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Minimax Image to Video node + Cleanup (#51) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Add types and doctstrings to utils file (#64) * Fix: `PollingOperation` progress bar update progress by absolute value (#65) * Use common download function in kling nodes module (#67) * Fix: Luma video nodes in `api nodes/image` category (#68) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add preview to Save SVG node (#74) * change default poll interval (#76), rework veo2 * Add Pixverse and updated Kling types (#75) * Added Pixverse Image to VIdeo node (#77) * Add Pixverse Transition Video node (#79) * Proper ray-1-6 support as fix has been applied in backend (#80) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Temporary Fix for Runway (#87) * Added Stability Stable Image Ultra node (#86) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Fix: typo in node name "Stabiliy" => "Stability" (#91) * Add String (Multiline) node (#93) * Update Pika Duration and Resolution options (#94) * Change base branch to master. Not main. (#95) * Fix UploadRequest file_name param (#98) * Removed Infinite Style Library until later (#99) * fix ideogram style types (#100) * fix multi image return (#101) * add metadata saving to SVG (#102) * Bump templates version to include API node template workflows (#104) * Fix: `download_url_to_video_output` return type (#103) * fix 4o generation bug (#106) * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Fix: datamodel-codegen sets string#binary type to non-existent `bytes_aliased` variable (#114) * Fix: Dall-e 2 not setting request content-type dynamically (#113) * Default request timeout: one hour. (#116) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Add 8 nodes - 4 BFL, 4 Stability (#117) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Use upstream PR: Support saving Comfy VIDEO type to buffer (#123) * Use Upstream PR: "Fix: Error creating video when sliced audio tensor chunks are non-c-contiguous" (#127) * Improve audio upload utils (#128) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Fix: Luma I2I fails when weight is <=0.01 (#132) * Change category of `LumaConcepts` node from image to video (#133) * Fix: `image.shape` accessed before `image` is null-checked (#134) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * Fixed Kling: Check attributes of pydantic types. (#144) * Bump `comfyui-workflow-templates` version (#142) * [Kling] Print response data when error validating response (#146) * Fix: error validating Kling image response, trying to use `"key" in` on Pydantic class instance (#147) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * [Kling] Fix typo in node description (#150) * [Kling] Fix: CFG min/max not being enforced (#151) * Rebase launch-rebase (private) on prep-branch (public copy of master) (#153) * Bump templates version (#154) * Fix: Kling image gen nodes don't return entire batch when `n` > 1 (#152) * Remove pixverse_template from PixVerse Transition Video node (#155) * Invert image_weight value on Luma Image to Image node (#156) * Invert and resize mask for Ideogram V3 node to match masking conventions (#158) * [Kling] Fix: image generation nodes not returning Tuple (#159) * [Bug] [Kling] Fix Kling camera control (#161) * Kling Image Gen v2 + improve node descriptions for Flux/OpenAI (#160) * [Kling] Don't return video_id from dual effect video (#162) * Bump frontend to 1.18.8 (#163) * Use 3.9 compat syntax (#164) * Use Python 3.10 * add example env var * Update templates to 0.1.11 * Bump frontend to 1.18.9 --------- Co-authored-by: Robin Huang <robin.j.huang@gmail.com> Co-authored-by: Christian Byrne <cbyrne@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com>
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
# generated by datamodel-codegen:
|
||||
# filename: https://api.comfy.org/openapi
|
||||
# timestamp: 2025-04-23T15:56:33+00:00
|
||||
# filename: filtered-openapi.yaml
|
||||
# timestamp: 2025-04-29T23:44:54+00:00
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
@@ -1,12 +1,12 @@
|
||||
# generated by datamodel-codegen:
|
||||
# filename: https://api.comfy.org/openapi
|
||||
# timestamp: 2025-04-23T15:56:33+00:00
|
||||
# filename: filtered-openapi.yaml
|
||||
# timestamp: 2025-04-29T23:44:54+00:00
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field, constr
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class V2OpenAPII2VResp(BaseModel):
|
||||
@@ -30,10 +30,10 @@ class V2OpenAPIT2VReq(BaseModel):
|
||||
description='Motion mode (normal, fast, --fast only available when duration=5; --quality=1080p does not support fast)',
|
||||
examples=['normal'],
|
||||
)
|
||||
negative_prompt: Optional[constr(max_length=2048)] = Field(
|
||||
None, description='Negative prompt\n'
|
||||
negative_prompt: Optional[str] = Field(
|
||||
None, description='Negative prompt\n', max_length=2048
|
||||
)
|
||||
prompt: constr(max_length=2048) = Field(..., description='Prompt')
|
||||
prompt: str = Field(..., description='Prompt', max_length=2048)
|
||||
quality: str = Field(
|
||||
...,
|
||||
description='Video quality ("360p"(Turbo model), "540p", "720p", "1080p")',
|
||||
|
File diff suppressed because it is too large
Load Diff
156
comfy_api_nodes/apis/bfl_api.py
Normal file
156
comfy_api_nodes/apis/bfl_api.py
Normal file
@@ -0,0 +1,156 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, confloat, conint
|
||||
|
||||
|
||||
class BFLOutputFormat(str, Enum):
|
||||
png = 'png'
|
||||
jpeg = 'jpeg'
|
||||
|
||||
|
||||
class BFLFluxExpandImageRequest(BaseModel):
|
||||
prompt: str = Field(..., description='The description of the changes you want to make. This text guides the expansion process, allowing you to specify features, styles, or modifications for the expanded areas.')
|
||||
prompt_upsampling: Optional[bool] = Field(
|
||||
None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.'
|
||||
)
|
||||
seed: Optional[int] = Field(None, description='The seed value for reproducibility.')
|
||||
top: conint(ge=0, le=2048) = Field(..., description='Number of pixels to expand at the top of the image')
|
||||
bottom: conint(ge=0, le=2048) = Field(..., description='Number of pixels to expand at the bottom of the image')
|
||||
left: conint(ge=0, le=2048) = Field(..., description='Number of pixels to expand at the left side of the image')
|
||||
right: conint(ge=0, le=2048) = Field(..., description='Number of pixels to expand at the right side of the image')
|
||||
steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process')
|
||||
guidance: confloat(ge=1.5, le=100) = Field(..., description='Guidance strength for the image generation process')
|
||||
safety_tolerance: Optional[conint(ge=0, le=6)] = Field(
|
||||
6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.'
|
||||
)
|
||||
output_format: Optional[BFLOutputFormat] = Field(
|
||||
BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png']
|
||||
)
|
||||
image: str = Field(None, description='A Base64-encoded string representing the image you wish to expand')
|
||||
|
||||
|
||||
class BFLFluxFillImageRequest(BaseModel):
|
||||
prompt: str = Field(..., description='The description of the changes you want to make. This text guides the expansion process, allowing you to specify features, styles, or modifications for the expanded areas.')
|
||||
prompt_upsampling: Optional[bool] = Field(
|
||||
None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.'
|
||||
)
|
||||
seed: Optional[int] = Field(None, description='The seed value for reproducibility.')
|
||||
steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process')
|
||||
guidance: confloat(ge=1.5, le=100) = Field(..., description='Guidance strength for the image generation process')
|
||||
safety_tolerance: Optional[conint(ge=0, le=6)] = Field(
|
||||
6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.'
|
||||
)
|
||||
output_format: Optional[BFLOutputFormat] = Field(
|
||||
BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png']
|
||||
)
|
||||
image: str = Field(None, description='A Base64-encoded string representing the image you wish to modify. Can contain alpha mask if desired.')
|
||||
mask: str = Field(None, description='A Base64-encoded string representing the mask of the areas you with to modify.')
|
||||
|
||||
|
||||
class BFLFluxCannyImageRequest(BaseModel):
|
||||
prompt: str = Field(..., description='Text prompt for image generation')
|
||||
prompt_upsampling: Optional[bool] = Field(
|
||||
None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.'
|
||||
)
|
||||
canny_low_threshold: Optional[int] = Field(None, description='Low threshold for Canny edge detection')
|
||||
canny_high_threshold: Optional[int] = Field(None, description='High threshold for Canny edge detection')
|
||||
seed: Optional[int] = Field(None, description='The seed value for reproducibility.')
|
||||
steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process')
|
||||
guidance: confloat(ge=1, le=100) = Field(..., description='Guidance strength for the image generation process')
|
||||
safety_tolerance: Optional[conint(ge=0, le=6)] = Field(
|
||||
6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.'
|
||||
)
|
||||
output_format: Optional[BFLOutputFormat] = Field(
|
||||
BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png']
|
||||
)
|
||||
control_image: Optional[str] = Field(None, description='Base64 encoded image to use as control input if no preprocessed image is provided')
|
||||
preprocessed_image: Optional[str] = Field(None, description='Optional pre-processed image that will bypass the control preprocessing step')
|
||||
|
||||
|
||||
class BFLFluxDepthImageRequest(BaseModel):
|
||||
prompt: str = Field(..., description='Text prompt for image generation')
|
||||
prompt_upsampling: Optional[bool] = Field(
|
||||
None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.'
|
||||
)
|
||||
seed: Optional[int] = Field(None, description='The seed value for reproducibility.')
|
||||
steps: conint(ge=15, le=50) = Field(..., description='Number of steps for the image generation process')
|
||||
guidance: confloat(ge=1, le=100) = Field(..., description='Guidance strength for the image generation process')
|
||||
safety_tolerance: Optional[conint(ge=0, le=6)] = Field(
|
||||
6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.'
|
||||
)
|
||||
output_format: Optional[BFLOutputFormat] = Field(
|
||||
BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png']
|
||||
)
|
||||
control_image: Optional[str] = Field(None, description='Base64 encoded image to use as control input if no preprocessed image is provided')
|
||||
preprocessed_image: Optional[str] = Field(None, description='Optional pre-processed image that will bypass the control preprocessing step')
|
||||
|
||||
|
||||
class BFLFluxProGenerateRequest(BaseModel):
|
||||
prompt: str = Field(..., description='The text prompt for image generation.')
|
||||
prompt_upsampling: Optional[bool] = Field(
|
||||
None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.'
|
||||
)
|
||||
seed: Optional[int] = Field(None, description='The seed value for reproducibility.')
|
||||
width: conint(ge=256, le=1440) = Field(1024, description='Width of the generated image in pixels. Must be a multiple of 32.')
|
||||
height: conint(ge=256, le=1440) = Field(768, description='Height of the generated image in pixels. Must be a multiple of 32.')
|
||||
safety_tolerance: Optional[conint(ge=0, le=6)] = Field(
|
||||
6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.'
|
||||
)
|
||||
output_format: Optional[BFLOutputFormat] = Field(
|
||||
BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png']
|
||||
)
|
||||
image_prompt: Optional[str] = Field(None, description='Optional image to remix in base64 format')
|
||||
# image_prompt_strength: Optional[confloat(ge=0.0, le=1.0)] = Field(
|
||||
# None, description='Blend between the prompt and the image prompt.'
|
||||
# )
|
||||
|
||||
|
||||
class BFLFluxProUltraGenerateRequest(BaseModel):
|
||||
prompt: str = Field(..., description='The text prompt for image generation.')
|
||||
prompt_upsampling: Optional[bool] = Field(
|
||||
None, description='Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.'
|
||||
)
|
||||
seed: Optional[int] = Field(None, description='The seed value for reproducibility.')
|
||||
aspect_ratio: Optional[str] = Field(None, description='Aspect ratio of the image between 21:9 and 9:21.')
|
||||
safety_tolerance: Optional[conint(ge=0, le=6)] = Field(
|
||||
6, description='Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict. Defaults to 2.'
|
||||
)
|
||||
output_format: Optional[BFLOutputFormat] = Field(
|
||||
BFLOutputFormat.png, description="Output format for the generated image. Can be 'jpeg' or 'png'.", examples=['png']
|
||||
)
|
||||
raw: Optional[bool] = Field(None, description='Generate less processed, more natural-looking images.')
|
||||
image_prompt: Optional[str] = Field(None, description='Optional image to remix in base64 format')
|
||||
image_prompt_strength: Optional[confloat(ge=0.0, le=1.0)] = Field(
|
||||
None, description='Blend between the prompt and the image prompt.'
|
||||
)
|
||||
|
||||
|
||||
class BFLFluxProGenerateResponse(BaseModel):
|
||||
id: str = Field(..., description='The unique identifier for the generation task.')
|
||||
polling_url: str = Field(..., description='URL to poll for the generation result.')
|
||||
|
||||
|
||||
class BFLStatus(str, Enum):
|
||||
task_not_found = "Task not found"
|
||||
pending = "Pending"
|
||||
request_moderated = "Request Moderated"
|
||||
content_moderated = "Content Moderated"
|
||||
ready = "Ready"
|
||||
error = "Error"
|
||||
|
||||
|
||||
class BFLFluxProStatusResponse(BaseModel):
|
||||
id: str = Field(..., description="The unique identifier for the generation task.")
|
||||
status: BFLStatus = Field(..., description="The status of the task.")
|
||||
result: Optional[Dict[str, Any]] = Field(
|
||||
None, description="The result of the task (null if not completed)."
|
||||
)
|
||||
progress: confloat(ge=0.0, le=1.0) = Field(
|
||||
..., description="The progress of the task (0.0 to 1.0)."
|
||||
)
|
||||
details: Optional[Dict[str, Any]] = Field(
|
||||
None, description="Additional details about the task (null if not available)."
|
||||
)
|
@@ -1,5 +1,3 @@
|
||||
import logging
|
||||
|
||||
"""
|
||||
API Client Framework for api.comfy.org.
|
||||
|
||||
@@ -46,24 +44,71 @@ operation = ApiOperation(
|
||||
)
|
||||
user_profile = operation.execute(client=api_client) # Returns immediately with the result
|
||||
|
||||
|
||||
# Example 2: Asynchronous API Operation with Polling
|
||||
# -------------------------------------------------
|
||||
# For an API that starts a task and requires polling for completion:
|
||||
|
||||
# 1. Define the endpoints (initial request and polling)
|
||||
generate_image_endpoint = ApiEndpoint(
|
||||
path="/v1/images/generate",
|
||||
method=HttpMethod.POST,
|
||||
request_model=ImageGenerationRequest,
|
||||
response_model=TaskCreatedResponse,
|
||||
query_params=None
|
||||
)
|
||||
|
||||
check_task_endpoint = ApiEndpoint(
|
||||
path="/v1/tasks/{task_id}",
|
||||
method=HttpMethod.GET,
|
||||
request_model=EmptyRequest,
|
||||
response_model=ImageGenerationResult,
|
||||
query_params=None
|
||||
)
|
||||
|
||||
# 2. Create the request object
|
||||
request = ImageGenerationRequest(
|
||||
prompt="a beautiful sunset over mountains",
|
||||
width=1024,
|
||||
height=1024,
|
||||
num_images=1
|
||||
)
|
||||
|
||||
# 3. Create and execute the polling operation
|
||||
operation = PollingOperation(
|
||||
initial_endpoint=generate_image_endpoint,
|
||||
initial_request=request,
|
||||
poll_endpoint=check_task_endpoint,
|
||||
task_id_field="task_id",
|
||||
status_field="status",
|
||||
completed_statuses=["completed"],
|
||||
failed_statuses=["failed", "error"]
|
||||
)
|
||||
|
||||
# This will make the initial request and then poll until completion
|
||||
result = operation.execute(client=api_client) # Returns the final ImageGenerationResult when done
|
||||
"""
|
||||
|
||||
from typing import (
|
||||
Dict,
|
||||
Type,
|
||||
Optional,
|
||||
Any,
|
||||
TypeVar,
|
||||
Generic,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
from __future__ import annotations
|
||||
import logging
|
||||
import time
|
||||
import io
|
||||
from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable
|
||||
from enum import Enum
|
||||
import json
|
||||
import requests
|
||||
from urllib.parse import urljoin
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from comfy.cli_args import args
|
||||
from comfy import utils
|
||||
|
||||
T = TypeVar("T", bound=BaseModel)
|
||||
R = TypeVar("R", bound=BaseModel)
|
||||
P = TypeVar("P", bound=BaseModel) # For poll response
|
||||
|
||||
PROGRESS_BAR_MAX = 100
|
||||
|
||||
|
||||
class EmptyRequest(BaseModel):
|
||||
"""Base class for empty request bodies.
|
||||
@@ -72,6 +117,19 @@ class EmptyRequest(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
class UploadRequest(BaseModel):
|
||||
file_name: str = Field(..., description="Filename to upload")
|
||||
content_type: str | None = Field(
|
||||
None,
|
||||
description="Mime type of the file. For example: image/png, image/jpeg, video/mp4, etc.",
|
||||
)
|
||||
|
||||
|
||||
class UploadResponse(BaseModel):
|
||||
download_url: str = Field(..., description="URL to GET uploaded file")
|
||||
upload_url: str = Field(..., description="URL to PUT file to upload")
|
||||
|
||||
|
||||
class HttpMethod(str, Enum):
|
||||
GET = "GET"
|
||||
POST = "POST"
|
||||
@@ -89,7 +147,7 @@ class ApiClient:
|
||||
self,
|
||||
base_url: str,
|
||||
api_key: Optional[str] = None,
|
||||
timeout: float = 30.0,
|
||||
timeout: float = 3600.0,
|
||||
verify_ssl: bool = True,
|
||||
):
|
||||
self.base_url = base_url
|
||||
@@ -97,6 +155,48 @@ class ApiClient:
|
||||
self.timeout = timeout
|
||||
self.verify_ssl = verify_ssl
|
||||
|
||||
def _create_json_payload_args(
|
||||
self,
|
||||
data: Optional[Dict[str, Any]] = None,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
return {
|
||||
"json": data,
|
||||
"headers": headers,
|
||||
}
|
||||
|
||||
def _create_form_data_args(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
files: Dict[str, Any],
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
multipart_parser = None,
|
||||
) -> Dict[str, Any]:
|
||||
if headers and "Content-Type" in headers:
|
||||
del headers["Content-Type"]
|
||||
|
||||
if multipart_parser:
|
||||
data = multipart_parser(data)
|
||||
|
||||
return {
|
||||
"data": data,
|
||||
"files": files,
|
||||
"headers": headers,
|
||||
}
|
||||
|
||||
def _create_urlencoded_form_data_args(
|
||||
self,
|
||||
data: Dict[str, Any],
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
headers = headers or {}
|
||||
headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
|
||||
return {
|
||||
"data": data,
|
||||
"headers": headers,
|
||||
}
|
||||
|
||||
def get_headers(self) -> Dict[str, str]:
|
||||
"""Get headers for API requests, including authentication if available"""
|
||||
headers = {"Content-Type": "application/json", "Accept": "application/json"}
|
||||
@@ -111,9 +211,11 @@ class ApiClient:
|
||||
method: str,
|
||||
path: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
json: Optional[Dict[str, Any]] = None,
|
||||
data: Optional[Dict[str, Any]] = None,
|
||||
files: Optional[Dict[str, Any]] = None,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
content_type: str = "application/json",
|
||||
multipart_parser: Callable = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Make an HTTP request to the API
|
||||
@@ -122,9 +224,10 @@ class ApiClient:
|
||||
method: HTTP method (GET, POST, etc.)
|
||||
path: API endpoint path (will be joined with base_url)
|
||||
params: Query parameters
|
||||
json: JSON body data
|
||||
data: body data
|
||||
files: Files to upload
|
||||
headers: Additional headers
|
||||
content_type: Content type of the request. Defaults to application/json.
|
||||
|
||||
Returns:
|
||||
Parsed JSON response
|
||||
@@ -146,34 +249,26 @@ class ApiClient:
|
||||
logging.debug(f"[DEBUG] Request Headers: {request_headers}")
|
||||
logging.debug(f"[DEBUG] Files: {files}")
|
||||
logging.debug(f"[DEBUG] Params: {params}")
|
||||
logging.debug(f"[DEBUG] Json: {json}")
|
||||
logging.debug(f"[DEBUG] Data: {data}")
|
||||
|
||||
if content_type == "application/x-www-form-urlencoded":
|
||||
payload_args = self._create_urlencoded_form_data_args(data, request_headers)
|
||||
elif content_type == "multipart/form-data":
|
||||
payload_args = self._create_form_data_args(
|
||||
data, files, request_headers, multipart_parser
|
||||
)
|
||||
else:
|
||||
payload_args = self._create_json_payload_args(data, request_headers)
|
||||
|
||||
try:
|
||||
# If files are present, use data parameter instead of json
|
||||
if files:
|
||||
form_data = {}
|
||||
if json:
|
||||
form_data.update(json)
|
||||
response = requests.request(
|
||||
method=method,
|
||||
url=url,
|
||||
params=params,
|
||||
data=form_data, # Use data instead of json
|
||||
files=files,
|
||||
headers=request_headers,
|
||||
timeout=self.timeout,
|
||||
verify=self.verify_ssl,
|
||||
)
|
||||
else:
|
||||
response = requests.request(
|
||||
method=method,
|
||||
url=url,
|
||||
params=params,
|
||||
json=json,
|
||||
headers=request_headers,
|
||||
timeout=self.timeout,
|
||||
verify=self.verify_ssl,
|
||||
)
|
||||
response = requests.request(
|
||||
method=method,
|
||||
url=url,
|
||||
params=params,
|
||||
timeout=self.timeout,
|
||||
verify=self.verify_ssl,
|
||||
**payload_args,
|
||||
)
|
||||
|
||||
# Raise exception for error status codes
|
||||
response.raise_for_status()
|
||||
@@ -203,7 +298,9 @@ class ApiClient:
|
||||
error_message = f"API Error: {error_json}"
|
||||
except Exception as json_error:
|
||||
# If we can't parse the JSON, fall back to the original error message
|
||||
logging.debug(f"[DEBUG] Failed to parse error response: {str(json_error)}")
|
||||
logging.debug(
|
||||
f"[DEBUG] Failed to parse error response: {str(json_error)}"
|
||||
)
|
||||
|
||||
logging.debug(f"[DEBUG] API Error: {error_message} (Status: {status_code})")
|
||||
if hasattr(e, "response") and e.response.content:
|
||||
@@ -229,6 +326,32 @@ class ApiClient:
|
||||
raise Exception("Unauthorized: Please login first to use this node.")
|
||||
return auth_token
|
||||
|
||||
@staticmethod
|
||||
def upload_file(
|
||||
upload_url: str,
|
||||
file: io.BytesIO | str,
|
||||
content_type: str | None = None,
|
||||
):
|
||||
"""Upload a file to the API. Make sure the file has a filename equal to what the url expects.
|
||||
|
||||
Args:
|
||||
upload_url: The URL to upload to
|
||||
file: Either a file path string, BytesIO object, or tuple of (file_path, filename)
|
||||
mime_type: Optional mime type to set for the upload
|
||||
"""
|
||||
headers = {}
|
||||
if content_type:
|
||||
headers["Content-Type"] = content_type
|
||||
|
||||
if isinstance(file, io.BytesIO):
|
||||
file.seek(0) # Ensure we're at the start of the file
|
||||
data = file.read()
|
||||
return requests.put(upload_url, data=data, headers=headers)
|
||||
elif isinstance(file, str):
|
||||
with open(file, "rb") as f:
|
||||
data = f.read()
|
||||
return requests.put(upload_url, data=data, headers=headers)
|
||||
|
||||
|
||||
class ApiEndpoint(Generic[T, R]):
|
||||
"""Defines an API endpoint with its request and response types"""
|
||||
@@ -267,27 +390,29 @@ class SynchronousOperation(Generic[T, R]):
|
||||
endpoint: ApiEndpoint[T, R],
|
||||
request: T,
|
||||
files: Optional[Dict[str, Any]] = None,
|
||||
api_base: str = "https://api.comfy.org",
|
||||
api_base: str | None = None,
|
||||
auth_token: Optional[str] = None,
|
||||
timeout: float = 604800.0,
|
||||
verify_ssl: bool = True,
|
||||
content_type: str = "application/json",
|
||||
multipart_parser: Callable = None,
|
||||
):
|
||||
self.endpoint = endpoint
|
||||
self.request = request
|
||||
self.response = None
|
||||
self.error = None
|
||||
self.api_base = api_base
|
||||
self.api_base: str = api_base or args.comfy_api_base
|
||||
self.auth_token = auth_token
|
||||
self.timeout = timeout
|
||||
self.verify_ssl = verify_ssl
|
||||
self.files = files
|
||||
self.content_type = content_type
|
||||
self.multipart_parser = multipart_parser
|
||||
def execute(self, client: Optional[ApiClient] = None) -> R:
|
||||
"""Execute the API operation using the provided client or create one"""
|
||||
try:
|
||||
# Create client if not provided
|
||||
if client is None:
|
||||
if self.api_base is None:
|
||||
raise ValueError("Either client or api_base must be provided")
|
||||
client = ApiClient(
|
||||
base_url=self.api_base,
|
||||
api_key=self.auth_token,
|
||||
@@ -296,14 +421,25 @@ class SynchronousOperation(Generic[T, R]):
|
||||
)
|
||||
|
||||
# Convert request model to dict, but use None for EmptyRequest
|
||||
request_dict = None if isinstance(self.request, EmptyRequest) else self.request.model_dump(exclude_none=True)
|
||||
request_dict = (
|
||||
None
|
||||
if isinstance(self.request, EmptyRequest)
|
||||
else self.request.model_dump(exclude_none=True)
|
||||
)
|
||||
if request_dict:
|
||||
for key, value in request_dict.items():
|
||||
if isinstance(value, Enum):
|
||||
request_dict[key] = value.value
|
||||
|
||||
if request_dict:
|
||||
for key, value in request_dict.items():
|
||||
if isinstance(value, Enum):
|
||||
request_dict[key] = value.value
|
||||
|
||||
# Debug log for request
|
||||
logging.debug(f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}")
|
||||
logging.debug(
|
||||
f"[DEBUG] API Request: {self.endpoint.method.value} {self.endpoint.path}"
|
||||
)
|
||||
logging.debug(f"[DEBUG] Request Data: {json.dumps(request_dict, indent=2)}")
|
||||
logging.debug(f"[DEBUG] Query Params: {self.endpoint.query_params}")
|
||||
|
||||
@@ -311,9 +447,11 @@ class SynchronousOperation(Generic[T, R]):
|
||||
resp = client.request(
|
||||
method=self.endpoint.method.value,
|
||||
path=self.endpoint.path,
|
||||
json=request_dict,
|
||||
data=request_dict,
|
||||
params=self.endpoint.query_params,
|
||||
files=self.files,
|
||||
content_type=self.content_type,
|
||||
multipart_parser=self.multipart_parser
|
||||
)
|
||||
|
||||
# Debug log for response
|
||||
@@ -327,7 +465,7 @@ class SynchronousOperation(Generic[T, R]):
|
||||
return self._parse_response(resp)
|
||||
|
||||
except Exception as e:
|
||||
logging.debug(f"[DEBUG] API Exception: {str(e)}")
|
||||
logging.error(f"[DEBUG] API Exception: {str(e)}")
|
||||
raise Exception(str(e))
|
||||
|
||||
def _parse_response(self, resp):
|
||||
@@ -339,3 +477,140 @@ class SynchronousOperation(Generic[T, R]):
|
||||
self.response = self.endpoint.response_model.model_validate(resp)
|
||||
logging.debug(f"[DEBUG] Parsed Response: {self.response}")
|
||||
return self.response
|
||||
|
||||
|
||||
class TaskStatus(str, Enum):
|
||||
"""Enum for task status values"""
|
||||
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
PENDING = "pending"
|
||||
|
||||
|
||||
class PollingOperation(Generic[T, R]):
|
||||
"""
|
||||
Represents an asynchronous API operation that requires polling for completion.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
poll_endpoint: ApiEndpoint[EmptyRequest, R],
|
||||
completed_statuses: list,
|
||||
failed_statuses: list,
|
||||
status_extractor: Callable[[R], str],
|
||||
progress_extractor: Callable[[R], float] = None,
|
||||
request: Optional[T] = None,
|
||||
api_base: str | None = None,
|
||||
auth_token: Optional[str] = None,
|
||||
poll_interval: float = 5.0,
|
||||
):
|
||||
self.poll_endpoint = poll_endpoint
|
||||
self.request = request
|
||||
self.api_base: str = api_base or args.comfy_api_base
|
||||
self.auth_token = auth_token
|
||||
self.poll_interval = poll_interval
|
||||
|
||||
# Polling configuration
|
||||
self.status_extractor = status_extractor or (
|
||||
lambda x: getattr(x, "status", None)
|
||||
)
|
||||
self.progress_extractor = progress_extractor
|
||||
self.completed_statuses = completed_statuses
|
||||
self.failed_statuses = failed_statuses
|
||||
|
||||
# For storing response data
|
||||
self.final_response = None
|
||||
self.error = None
|
||||
|
||||
def execute(self, client: Optional[ApiClient] = None) -> R:
|
||||
"""Execute the polling operation using the provided client. If failed, raise an exception."""
|
||||
try:
|
||||
if client is None:
|
||||
client = ApiClient(
|
||||
base_url=self.api_base,
|
||||
api_key=self.auth_token,
|
||||
)
|
||||
return self._poll_until_complete(client)
|
||||
except Exception as e:
|
||||
raise Exception(f"Error during polling: {str(e)}")
|
||||
|
||||
def _check_task_status(self, response: R) -> TaskStatus:
|
||||
"""Check task status using the status extractor function"""
|
||||
try:
|
||||
status = self.status_extractor(response)
|
||||
if status in self.completed_statuses:
|
||||
return TaskStatus.COMPLETED
|
||||
elif status in self.failed_statuses:
|
||||
return TaskStatus.FAILED
|
||||
return TaskStatus.PENDING
|
||||
except Exception as e:
|
||||
logging.error(f"Error extracting status: {e}")
|
||||
return TaskStatus.PENDING
|
||||
|
||||
def _poll_until_complete(self, client: ApiClient) -> R:
|
||||
"""Poll until the task is complete"""
|
||||
poll_count = 0
|
||||
if self.progress_extractor:
|
||||
progress = utils.ProgressBar(PROGRESS_BAR_MAX)
|
||||
|
||||
while True:
|
||||
try:
|
||||
poll_count += 1
|
||||
logging.debug(f"[DEBUG] Polling attempt #{poll_count}")
|
||||
|
||||
request_dict = (
|
||||
self.request.model_dump(exclude_none=True)
|
||||
if self.request is not None
|
||||
else None
|
||||
)
|
||||
|
||||
if poll_count == 1:
|
||||
logging.debug(
|
||||
f"[DEBUG] Poll Request: {self.poll_endpoint.method.value} {self.poll_endpoint.path}"
|
||||
)
|
||||
logging.debug(
|
||||
f"[DEBUG] Poll Request Data: {json.dumps(request_dict, indent=2) if request_dict else 'None'}"
|
||||
)
|
||||
|
||||
# Query task status
|
||||
resp = client.request(
|
||||
method=self.poll_endpoint.method.value,
|
||||
path=self.poll_endpoint.path,
|
||||
params=self.poll_endpoint.query_params,
|
||||
data=request_dict,
|
||||
)
|
||||
|
||||
# Parse response
|
||||
response_obj = self.poll_endpoint.response_model.model_validate(resp)
|
||||
# Check if task is complete
|
||||
status = self._check_task_status(response_obj)
|
||||
logging.debug(f"[DEBUG] Task Status: {status}")
|
||||
|
||||
# If progress extractor is provided, extract progress
|
||||
if self.progress_extractor:
|
||||
new_progress = self.progress_extractor(response_obj)
|
||||
if new_progress is not None:
|
||||
progress.update_absolute(new_progress, total=PROGRESS_BAR_MAX)
|
||||
|
||||
if status == TaskStatus.COMPLETED:
|
||||
logging.debug("[DEBUG] Task completed successfully")
|
||||
self.final_response = response_obj
|
||||
if self.progress_extractor:
|
||||
progress.update(100)
|
||||
return self.final_response
|
||||
elif status == TaskStatus.FAILED:
|
||||
message = f"Task failed: {json.dumps(resp)}"
|
||||
logging.error(f"[DEBUG] {message}")
|
||||
raise Exception(message)
|
||||
else:
|
||||
logging.debug("[DEBUG] Task still pending, continuing to poll...")
|
||||
|
||||
# Wait before polling again
|
||||
logging.debug(
|
||||
f"[DEBUG] Waiting {self.poll_interval} seconds before next poll"
|
||||
)
|
||||
time.sleep(self.poll_interval)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"[DEBUG] Polling error: {str(e)}")
|
||||
raise Exception(f"Error while polling: {str(e)}")
|
||||
|
253
comfy_api_nodes/apis/luma_api.py
Normal file
253
comfy_api_nodes/apis/luma_api.py
Normal file
@@ -0,0 +1,253 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
import torch
|
||||
|
||||
from enum import Enum
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, confloat
|
||||
|
||||
|
||||
|
||||
class LumaIO:
|
||||
LUMA_REF = "LUMA_REF"
|
||||
LUMA_CONCEPTS = "LUMA_CONCEPTS"
|
||||
|
||||
|
||||
class LumaReference:
|
||||
def __init__(self, image: torch.Tensor, weight: float):
|
||||
self.image = image
|
||||
self.weight = weight
|
||||
|
||||
def create_api_model(self, download_url: str):
|
||||
return LumaImageRef(url=download_url, weight=self.weight)
|
||||
|
||||
class LumaReferenceChain:
|
||||
def __init__(self, first_ref: LumaReference=None):
|
||||
self.refs: list[LumaReference] = []
|
||||
if first_ref:
|
||||
self.refs.append(first_ref)
|
||||
|
||||
def add(self, luma_ref: LumaReference=None):
|
||||
self.refs.append(luma_ref)
|
||||
|
||||
def create_api_model(self, download_urls: list[str], max_refs=4):
|
||||
if len(self.refs) == 0:
|
||||
return None
|
||||
api_refs: list[LumaImageRef] = []
|
||||
for ref, url in zip(self.refs, download_urls):
|
||||
api_ref = LumaImageRef(url=url, weight=ref.weight)
|
||||
api_refs.append(api_ref)
|
||||
return api_refs
|
||||
|
||||
def clone(self):
|
||||
c = LumaReferenceChain()
|
||||
for ref in self.refs:
|
||||
c.add(ref)
|
||||
return c
|
||||
|
||||
|
||||
class LumaConcept:
|
||||
def __init__(self, key: str):
|
||||
self.key = key
|
||||
|
||||
|
||||
class LumaConceptChain:
|
||||
def __init__(self, str_list: list[str] = None):
|
||||
self.concepts: list[LumaConcept] = []
|
||||
if str_list is not None:
|
||||
for c in str_list:
|
||||
if c != "None":
|
||||
self.add(LumaConcept(key=c))
|
||||
|
||||
def add(self, concept: LumaConcept):
|
||||
self.concepts.append(concept)
|
||||
|
||||
def create_api_model(self):
|
||||
if len(self.concepts) == 0:
|
||||
return None
|
||||
api_concepts: list[LumaConceptObject] = []
|
||||
for concept in self.concepts:
|
||||
if concept.key == "None":
|
||||
continue
|
||||
api_concepts.append(LumaConceptObject(key=concept.key))
|
||||
if len(api_concepts) == 0:
|
||||
return None
|
||||
return api_concepts
|
||||
|
||||
def clone(self):
|
||||
c = LumaConceptChain()
|
||||
for concept in self.concepts:
|
||||
c.add(concept)
|
||||
return c
|
||||
|
||||
def clone_and_merge(self, other: LumaConceptChain):
|
||||
c = self.clone()
|
||||
for concept in other.concepts:
|
||||
c.add(concept)
|
||||
return c
|
||||
|
||||
|
||||
def get_luma_concepts(include_none=False):
|
||||
concepts = []
|
||||
if include_none:
|
||||
concepts.append("None")
|
||||
return concepts + [
|
||||
"truck_left",
|
||||
"pan_right",
|
||||
"pedestal_down",
|
||||
"low_angle",
|
||||
"pedestal_up",
|
||||
"selfie",
|
||||
"pan_left",
|
||||
"roll_right",
|
||||
"zoom_in",
|
||||
"over_the_shoulder",
|
||||
"orbit_right",
|
||||
"orbit_left",
|
||||
"static",
|
||||
"tiny_planet",
|
||||
"high_angle",
|
||||
"bolt_cam",
|
||||
"dolly_zoom",
|
||||
"overhead",
|
||||
"zoom_out",
|
||||
"handheld",
|
||||
"roll_left",
|
||||
"pov",
|
||||
"aerial_drone",
|
||||
"push_in",
|
||||
"crane_down",
|
||||
"truck_right",
|
||||
"tilt_down",
|
||||
"elevator_doors",
|
||||
"tilt_up",
|
||||
"ground_level",
|
||||
"pull_out",
|
||||
"aerial",
|
||||
"crane_up",
|
||||
"eye_level"
|
||||
]
|
||||
|
||||
|
||||
class LumaImageModel(str, Enum):
|
||||
photon_1 = "photon-1"
|
||||
photon_flash_1 = "photon-flash-1"
|
||||
|
||||
|
||||
class LumaVideoModel(str, Enum):
|
||||
ray_2 = "ray-2"
|
||||
ray_flash_2 = "ray-flash-2"
|
||||
ray_1_6 = "ray-1-6"
|
||||
|
||||
|
||||
class LumaAspectRatio(str, Enum):
|
||||
ratio_1_1 = "1:1"
|
||||
ratio_16_9 = "16:9"
|
||||
ratio_9_16 = "9:16"
|
||||
ratio_4_3 = "4:3"
|
||||
ratio_3_4 = "3:4"
|
||||
ratio_21_9 = "21:9"
|
||||
ratio_9_21 = "9:21"
|
||||
|
||||
|
||||
class LumaVideoOutputResolution(str, Enum):
|
||||
res_540p = "540p"
|
||||
res_720p = "720p"
|
||||
res_1080p = "1080p"
|
||||
res_4k = "4k"
|
||||
|
||||
|
||||
class LumaVideoModelOutputDuration(str, Enum):
|
||||
dur_5s = "5s"
|
||||
dur_9s = "9s"
|
||||
|
||||
|
||||
class LumaGenerationType(str, Enum):
|
||||
video = 'video'
|
||||
image = 'image'
|
||||
|
||||
|
||||
class LumaState(str, Enum):
|
||||
queued = "queued"
|
||||
dreaming = "dreaming"
|
||||
completed = "completed"
|
||||
failed = "failed"
|
||||
|
||||
|
||||
class LumaAssets(BaseModel):
|
||||
video: Optional[str] = Field(None, description='The URL of the video')
|
||||
image: Optional[str] = Field(None, description='The URL of the image')
|
||||
progress_video: Optional[str] = Field(None, description='The URL of the progress video')
|
||||
|
||||
|
||||
class LumaImageRef(BaseModel):
|
||||
'''Used for image gen'''
|
||||
url: str = Field(..., description='The URL of the image reference')
|
||||
weight: confloat(ge=0.0, le=1.0) = Field(..., description='The weight of the image reference')
|
||||
|
||||
|
||||
class LumaImageReference(BaseModel):
|
||||
'''Used for video gen'''
|
||||
type: Optional[str] = Field('image', description='Input type, defaults to image')
|
||||
url: str = Field(..., description='The URL of the image')
|
||||
|
||||
|
||||
class LumaModifyImageRef(BaseModel):
|
||||
url: str = Field(..., description='The URL of the image reference')
|
||||
weight: confloat(ge=0.0, le=1.0) = Field(..., description='The weight of the image reference')
|
||||
|
||||
|
||||
class LumaCharacterRef(BaseModel):
|
||||
identity0: LumaImageIdentity = Field(..., description='The image identity object')
|
||||
|
||||
|
||||
class LumaImageIdentity(BaseModel):
|
||||
images: list[str] = Field(..., description='The URLs of the image identity')
|
||||
|
||||
|
||||
class LumaGenerationReference(BaseModel):
|
||||
type: str = Field('generation', description='Input type, defaults to generation')
|
||||
id: str = Field(..., description='The ID of the generation')
|
||||
|
||||
|
||||
class LumaKeyframes(BaseModel):
|
||||
frame0: Optional[Union[LumaImageReference, LumaGenerationReference]] = Field(None, description='')
|
||||
frame1: Optional[Union[LumaImageReference, LumaGenerationReference]] = Field(None, description='')
|
||||
|
||||
|
||||
class LumaConceptObject(BaseModel):
|
||||
key: str = Field(..., description='Camera Concept name')
|
||||
|
||||
|
||||
class LumaImageGenerationRequest(BaseModel):
|
||||
prompt: str = Field(..., description='The prompt of the generation')
|
||||
model: LumaImageModel = Field(LumaImageModel.photon_1, description='The image model used for the generation')
|
||||
aspect_ratio: Optional[LumaAspectRatio] = Field(LumaAspectRatio.ratio_16_9, description='The aspect ratio of the generation')
|
||||
image_ref: Optional[list[LumaImageRef]] = Field(None, description='List of image reference objects')
|
||||
style_ref: Optional[list[LumaImageRef]] = Field(None, description='List of style reference objects')
|
||||
character_ref: Optional[LumaCharacterRef] = Field(None, description='The image identity object')
|
||||
modify_image_ref: Optional[LumaModifyImageRef] = Field(None, description='The modify image reference object')
|
||||
|
||||
|
||||
class LumaGenerationRequest(BaseModel):
|
||||
prompt: str = Field(..., description='The prompt of the generation')
|
||||
model: LumaVideoModel = Field(LumaVideoModel.ray_2, description='The video model used for the generation')
|
||||
duration: Optional[LumaVideoModelOutputDuration] = Field(None, description='The duration of the generation')
|
||||
aspect_ratio: Optional[LumaAspectRatio] = Field(None, description='The aspect ratio of the generation')
|
||||
resolution: Optional[LumaVideoOutputResolution] = Field(None, description='The resolution of the generation')
|
||||
loop: Optional[bool] = Field(None, description='Whether to loop the video')
|
||||
keyframes: Optional[LumaKeyframes] = Field(None, description='The keyframes of the generation')
|
||||
concepts: Optional[list[LumaConceptObject]] = Field(None, description='Camera Concepts to apply to generation')
|
||||
|
||||
|
||||
class LumaGeneration(BaseModel):
|
||||
id: str = Field(..., description='The ID of the generation')
|
||||
generation_type: LumaGenerationType = Field(..., description='Generation type, image or video')
|
||||
state: LumaState = Field(..., description='The state of the generation')
|
||||
failure_reason: Optional[str] = Field(None, description='The reason for the state of the generation')
|
||||
created_at: str = Field(..., description='The date and time when the generation was created')
|
||||
assets: Optional[LumaAssets] = Field(None, description='The assets of the generation')
|
||||
model: str = Field(..., description='The model used for the generation')
|
||||
request: Union[LumaGenerationRequest, LumaImageGenerationRequest] = Field(..., description="The request used for the generation")
|
146
comfy_api_nodes/apis/pixverse_api.py
Normal file
146
comfy_api_nodes/apis/pixverse_api.py
Normal file
@@ -0,0 +1,146 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
pixverse_templates = {
|
||||
"Microwave": 324641385496960,
|
||||
"Suit Swagger": 328545151283968,
|
||||
"Anything, Robot": 313358700761536,
|
||||
"Subject 3 Fever": 327828816843648,
|
||||
"kiss kiss": 315446315336768,
|
||||
}
|
||||
|
||||
|
||||
class PixverseIO:
|
||||
TEMPLATE = "PIXVERSE_TEMPLATE"
|
||||
|
||||
|
||||
class PixverseStatus(int, Enum):
|
||||
successful = 1
|
||||
generating = 5
|
||||
deleted = 6
|
||||
contents_moderation = 7
|
||||
failed = 8
|
||||
|
||||
|
||||
class PixverseAspectRatio(str, Enum):
|
||||
ratio_16_9 = "16:9"
|
||||
ratio_4_3 = "4:3"
|
||||
ratio_1_1 = "1:1"
|
||||
ratio_3_4 = "3:4"
|
||||
ratio_9_16 = "9:16"
|
||||
|
||||
|
||||
class PixverseQuality(str, Enum):
|
||||
res_360p = "360p"
|
||||
res_540p = "540p"
|
||||
res_720p = "720p"
|
||||
res_1080p = "1080p"
|
||||
|
||||
|
||||
class PixverseDuration(int, Enum):
|
||||
dur_5 = 5
|
||||
dur_8 = 8
|
||||
|
||||
|
||||
class PixverseMotionMode(str, Enum):
|
||||
normal = "normal"
|
||||
fast = "fast"
|
||||
|
||||
|
||||
class PixverseStyle(str, Enum):
|
||||
anime = "anime"
|
||||
animation_3d = "3d_animation"
|
||||
clay = "clay"
|
||||
comic = "comic"
|
||||
cyberpunk = "cyberpunk"
|
||||
|
||||
|
||||
# NOTE: forgoing descriptions for now in return for dev speed
|
||||
class PixverseTextVideoRequest(BaseModel):
|
||||
aspect_ratio: PixverseAspectRatio = Field(...)
|
||||
quality: PixverseQuality = Field(...)
|
||||
duration: PixverseDuration = Field(...)
|
||||
model: Optional[str] = Field("v3.5")
|
||||
motion_mode: Optional[PixverseMotionMode] = Field(PixverseMotionMode.normal)
|
||||
prompt: str = Field(...)
|
||||
negative_prompt: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
style: Optional[str] = Field(None)
|
||||
template_id: Optional[int] = Field(None)
|
||||
water_mark: Optional[bool] = Field(None)
|
||||
|
||||
|
||||
class PixverseImageVideoRequest(BaseModel):
|
||||
quality: PixverseQuality = Field(...)
|
||||
duration: PixverseDuration = Field(...)
|
||||
img_id: int = Field(...)
|
||||
model: Optional[str] = Field("v3.5")
|
||||
motion_mode: Optional[PixverseMotionMode] = Field(PixverseMotionMode.normal)
|
||||
prompt: str = Field(...)
|
||||
negative_prompt: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
style: Optional[str] = Field(None)
|
||||
template_id: Optional[int] = Field(None)
|
||||
water_mark: Optional[bool] = Field(None)
|
||||
|
||||
|
||||
class PixverseTransitionVideoRequest(BaseModel):
|
||||
quality: PixverseQuality = Field(...)
|
||||
duration: PixverseDuration = Field(...)
|
||||
first_frame_img: int = Field(...)
|
||||
last_frame_img: int = Field(...)
|
||||
model: Optional[str] = Field("v3.5")
|
||||
motion_mode: Optional[PixverseMotionMode] = Field(PixverseMotionMode.normal)
|
||||
prompt: str = Field(...)
|
||||
# negative_prompt: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
# style: Optional[str] = Field(None)
|
||||
# template_id: Optional[int] = Field(None)
|
||||
# water_mark: Optional[bool] = Field(None)
|
||||
|
||||
|
||||
class PixverseImageUploadResponse(BaseModel):
|
||||
ErrCode: Optional[int] = None
|
||||
ErrMsg: Optional[str] = None
|
||||
Resp: Optional[PixverseImgIdResponseObject] = Field(None, alias='Resp')
|
||||
|
||||
|
||||
class PixverseImgIdResponseObject(BaseModel):
|
||||
img_id: Optional[int] = None
|
||||
|
||||
|
||||
class PixverseVideoResponse(BaseModel):
|
||||
ErrCode: Optional[int] = Field(None)
|
||||
ErrMsg: Optional[str] = Field(None)
|
||||
Resp: Optional[PixverseVideoIdResponseObject] = Field(None)
|
||||
|
||||
|
||||
class PixverseVideoIdResponseObject(BaseModel):
|
||||
video_id: int = Field(..., description='Video_id')
|
||||
|
||||
|
||||
class PixverseGenerationStatusResponse(BaseModel):
|
||||
ErrCode: Optional[int] = Field(None)
|
||||
ErrMsg: Optional[str] = Field(None)
|
||||
Resp: Optional[PixverseGenerationStatusResponseObject] = Field(None)
|
||||
|
||||
|
||||
class PixverseGenerationStatusResponseObject(BaseModel):
|
||||
create_time: Optional[str] = Field(None)
|
||||
id: Optional[int] = Field(None)
|
||||
modify_time: Optional[str] = Field(None)
|
||||
negative_prompt: Optional[str] = Field(None)
|
||||
outputHeight: Optional[int] = Field(None)
|
||||
outputWidth: Optional[int] = Field(None)
|
||||
prompt: Optional[str] = Field(None)
|
||||
resolution_ratio: Optional[int] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
size: Optional[int] = Field(None)
|
||||
status: Optional[int] = Field(None)
|
||||
style: Optional[str] = Field(None)
|
||||
url: Optional[str] = Field(None)
|
263
comfy_api_nodes/apis/recraft_api.py
Normal file
263
comfy_api_nodes/apis/recraft_api.py
Normal file
@@ -0,0 +1,263 @@
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field, conint, confloat
|
||||
|
||||
|
||||
class RecraftColor:
|
||||
def __init__(self, r: int, g: int, b: int):
|
||||
self.color = [r, g, b]
|
||||
|
||||
def create_api_model(self):
|
||||
return RecraftColorObject(rgb=self.color)
|
||||
|
||||
|
||||
class RecraftColorChain:
|
||||
def __init__(self):
|
||||
self.colors: list[RecraftColor] = []
|
||||
|
||||
def get_first(self):
|
||||
if len(self.colors) > 0:
|
||||
return self.colors[0]
|
||||
return None
|
||||
|
||||
def add(self, color: RecraftColor):
|
||||
self.colors.append(color)
|
||||
|
||||
def create_api_model(self):
|
||||
if not self.colors:
|
||||
return None
|
||||
colors_api = [x.create_api_model() for x in self.colors]
|
||||
return colors_api
|
||||
|
||||
def clone(self):
|
||||
c = RecraftColorChain()
|
||||
for color in self.colors:
|
||||
c.add(color)
|
||||
return c
|
||||
|
||||
def clone_and_merge(self, other: RecraftColorChain):
|
||||
c = self.clone()
|
||||
for color in other.colors:
|
||||
c.add(color)
|
||||
return c
|
||||
|
||||
|
||||
class RecraftControls:
|
||||
def __init__(self, colors: RecraftColorChain=None, background_color: RecraftColorChain=None,
|
||||
artistic_level: int=None, no_text: bool=None):
|
||||
self.colors = colors
|
||||
self.background_color = background_color
|
||||
self.artistic_level = artistic_level
|
||||
self.no_text = no_text
|
||||
|
||||
def create_api_model(self):
|
||||
if self.colors is None and self.background_color is None and self.artistic_level is None and self.no_text is None:
|
||||
return None
|
||||
colors_api = None
|
||||
background_color_api = None
|
||||
if self.colors:
|
||||
colors_api = self.colors.create_api_model()
|
||||
if self.background_color:
|
||||
first_background = self.background_color.get_first()
|
||||
background_color_api = first_background.create_api_model() if first_background else None
|
||||
|
||||
return RecraftControlsObject(colors=colors_api, background_color=background_color_api,
|
||||
artistic_level=self.artistic_level, no_text=self.no_text)
|
||||
|
||||
|
||||
class RecraftStyle:
|
||||
def __init__(self, style: str=None, substyle: str=None, style_id: str=None):
|
||||
self.style = style
|
||||
if substyle == "None":
|
||||
substyle = None
|
||||
self.substyle = substyle
|
||||
self.style_id = style_id
|
||||
|
||||
|
||||
class RecraftIO:
|
||||
STYLEV3 = "RECRAFT_V3_STYLE"
|
||||
SVG = "SVG" # TODO: if acceptable, move into ComfyUI's typing class
|
||||
COLOR = "RECRAFT_COLOR"
|
||||
CONTROLS = "RECRAFT_CONTROLS"
|
||||
|
||||
|
||||
class RecraftStyleV3(str, Enum):
|
||||
#any = 'any' NOTE: this does not work for some reason... why?
|
||||
realistic_image = 'realistic_image'
|
||||
digital_illustration = 'digital_illustration'
|
||||
vector_illustration = 'vector_illustration'
|
||||
logo_raster = 'logo_raster'
|
||||
|
||||
|
||||
def get_v3_substyles(style_v3: str, include_none=True) -> list[str]:
|
||||
substyles: list[str] = []
|
||||
if include_none:
|
||||
substyles.append("None")
|
||||
return substyles + dict_recraft_substyles_v3.get(style_v3, [])
|
||||
|
||||
|
||||
dict_recraft_substyles_v3 = {
|
||||
RecraftStyleV3.realistic_image: [
|
||||
"b_and_w",
|
||||
"enterprise",
|
||||
"evening_light",
|
||||
"faded_nostalgia",
|
||||
"forest_life",
|
||||
"hard_flash",
|
||||
"hdr",
|
||||
"motion_blur",
|
||||
"mystic_naturalism",
|
||||
"natural_light",
|
||||
"natural_tones",
|
||||
"organic_calm",
|
||||
"real_life_glow",
|
||||
"retro_realism",
|
||||
"retro_snapshot",
|
||||
"studio_portrait",
|
||||
"urban_drama",
|
||||
"village_realism",
|
||||
"warm_folk"
|
||||
],
|
||||
RecraftStyleV3.digital_illustration: [
|
||||
"2d_art_poster",
|
||||
"2d_art_poster_2",
|
||||
"antiquarian",
|
||||
"bold_fantasy",
|
||||
"child_book",
|
||||
"child_books",
|
||||
"cover",
|
||||
"crosshatch",
|
||||
"digital_engraving",
|
||||
"engraving_color",
|
||||
"expressionism",
|
||||
"freehand_details",
|
||||
"grain",
|
||||
"grain_20",
|
||||
"graphic_intensity",
|
||||
"hand_drawn",
|
||||
"hand_drawn_outline",
|
||||
"handmade_3d",
|
||||
"hard_comics",
|
||||
"infantile_sketch",
|
||||
"long_shadow",
|
||||
"modern_folk",
|
||||
"multicolor",
|
||||
"neon_calm",
|
||||
"noir",
|
||||
"nostalgic_pastel",
|
||||
"outline_details",
|
||||
"pastel_gradient",
|
||||
"pastel_sketch",
|
||||
"pixel_art",
|
||||
"plastic",
|
||||
"pop_art",
|
||||
"pop_renaissance",
|
||||
"seamless",
|
||||
"street_art",
|
||||
"tablet_sketch",
|
||||
"urban_glow",
|
||||
"urban_sketching",
|
||||
"vanilla_dreams",
|
||||
"young_adult_book",
|
||||
"young_adult_book_2"
|
||||
],
|
||||
RecraftStyleV3.vector_illustration: [
|
||||
"bold_stroke",
|
||||
"chemistry",
|
||||
"colored_stencil",
|
||||
"contour_pop_art",
|
||||
"cosmics",
|
||||
"cutout",
|
||||
"depressive",
|
||||
"editorial",
|
||||
"emotional_flat",
|
||||
"engraving",
|
||||
"infographical",
|
||||
"line_art",
|
||||
"line_circuit",
|
||||
"linocut",
|
||||
"marker_outline",
|
||||
"mosaic",
|
||||
"naivector",
|
||||
"roundish_flat",
|
||||
"seamless",
|
||||
"segmented_colors",
|
||||
"sharp_contrast",
|
||||
"thin",
|
||||
"vector_photo",
|
||||
"vivid_shapes"
|
||||
],
|
||||
RecraftStyleV3.logo_raster: [
|
||||
"emblem_graffiti",
|
||||
"emblem_pop_art",
|
||||
"emblem_punk",
|
||||
"emblem_stamp",
|
||||
"emblem_vintage"
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class RecraftModel(str, Enum):
|
||||
recraftv3 = 'recraftv3'
|
||||
recraftv2 = 'recraftv2'
|
||||
|
||||
|
||||
class RecraftImageSize(str, Enum):
|
||||
res_1024x1024 = '1024x1024'
|
||||
res_1365x1024 = '1365x1024'
|
||||
res_1024x1365 = '1024x1365'
|
||||
res_1536x1024 = '1536x1024'
|
||||
res_1024x1536 = '1024x1536'
|
||||
res_1820x1024 = '1820x1024'
|
||||
res_1024x1820 = '1024x1820'
|
||||
res_1024x2048 = '1024x2048'
|
||||
res_2048x1024 = '2048x1024'
|
||||
res_1434x1024 = '1434x1024'
|
||||
res_1024x1434 = '1024x1434'
|
||||
res_1024x1280 = '1024x1280'
|
||||
res_1280x1024 = '1280x1024'
|
||||
res_1024x1707 = '1024x1707'
|
||||
res_1707x1024 = '1707x1024'
|
||||
|
||||
|
||||
class RecraftColorObject(BaseModel):
|
||||
rgb: list[int] = Field(..., description='An array of 3 integer values in range of 0...255 defining RGB Color Model')
|
||||
|
||||
|
||||
class RecraftControlsObject(BaseModel):
|
||||
colors: Optional[list[RecraftColorObject]] = Field(None, description='An array of preferable colors')
|
||||
background_color: Optional[RecraftColorObject] = Field(None, description='Use given color as a desired background color')
|
||||
no_text: Optional[bool] = Field(None, description='Do not embed text layouts')
|
||||
artistic_level: Optional[conint(ge=0, le=5)] = Field(None, description='Defines artistic tone of your image. At a simple level, the person looks straight at the camera in a static and clean style. Dynamic and eccentric levels introduce movement and creativity. The value should be in range [0..5].')
|
||||
|
||||
|
||||
class RecraftImageGenerationRequest(BaseModel):
|
||||
prompt: str = Field(..., description='The text prompt describing the image to generate')
|
||||
size: Optional[RecraftImageSize] = Field(None, description='The size of the generated image (e.g., "1024x1024")')
|
||||
n: conint(ge=1, le=6) = Field(..., description='The number of images to generate')
|
||||
negative_prompt: Optional[str] = Field(None, description='A text description of undesired elements on an image')
|
||||
model: Optional[RecraftModel] = Field(RecraftModel.recraftv3, description='The model to use for generation (e.g., "recraftv3")')
|
||||
style: Optional[str] = Field(None, description='The style to apply to the generated image (e.g., "digital_illustration")')
|
||||
substyle: Optional[str] = Field(None, description='The substyle to apply to the generated image, depending on the style input')
|
||||
controls: Optional[RecraftControlsObject] = Field(None, description='A set of custom parameters to tweak generation process')
|
||||
style_id: Optional[str] = Field(None, description='Use a previously uploaded style as a reference; UUID')
|
||||
strength: Optional[confloat(ge=0.0, le=1.0)] = Field(None, description='Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity')
|
||||
random_seed: Optional[int] = Field(None, description="Seed for video generation")
|
||||
# text_layout
|
||||
|
||||
|
||||
class RecraftReturnedObject(BaseModel):
|
||||
image_id: str = Field(..., description='Unique identifier for the generated image')
|
||||
url: str = Field(..., description='URL to access the generated image')
|
||||
|
||||
|
||||
class RecraftImageGenerationResponse(BaseModel):
|
||||
created: int = Field(..., description='Unix timestamp when the generation was created')
|
||||
credits: int = Field(..., description='Number of credits used for the generation')
|
||||
data: Optional[list[RecraftReturnedObject]] = Field(None, description='Array of generated image information')
|
||||
image: Optional[RecraftReturnedObject] = Field(None, description='Single generated image')
|
127
comfy_api_nodes/apis/stability_api.py
Normal file
127
comfy_api_nodes/apis/stability_api.py
Normal file
@@ -0,0 +1,127 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field, confloat
|
||||
|
||||
|
||||
class StabilityFormat(str, Enum):
|
||||
png = 'png'
|
||||
jpeg = 'jpeg'
|
||||
webp = 'webp'
|
||||
|
||||
|
||||
class StabilityAspectRatio(str, Enum):
|
||||
ratio_1_1 = "1:1"
|
||||
ratio_16_9 = "16:9"
|
||||
ratio_9_16 = "9:16"
|
||||
ratio_3_2 = "3:2"
|
||||
ratio_2_3 = "2:3"
|
||||
ratio_5_4 = "5:4"
|
||||
ratio_4_5 = "4:5"
|
||||
ratio_21_9 = "21:9"
|
||||
ratio_9_21 = "9:21"
|
||||
|
||||
|
||||
def get_stability_style_presets(include_none=True):
|
||||
presets = []
|
||||
if include_none:
|
||||
presets.append("None")
|
||||
return presets + [x.value for x in StabilityStylePreset]
|
||||
|
||||
|
||||
class StabilityStylePreset(str, Enum):
|
||||
_3d_model = "3d-model"
|
||||
analog_film = "analog-film"
|
||||
anime = "anime"
|
||||
cinematic = "cinematic"
|
||||
comic_book = "comic-book"
|
||||
digital_art = "digital-art"
|
||||
enhance = "enhance"
|
||||
fantasy_art = "fantasy-art"
|
||||
isometric = "isometric"
|
||||
line_art = "line-art"
|
||||
low_poly = "low-poly"
|
||||
modeling_compound = "modeling-compound"
|
||||
neon_punk = "neon-punk"
|
||||
origami = "origami"
|
||||
photographic = "photographic"
|
||||
pixel_art = "pixel-art"
|
||||
tile_texture = "tile-texture"
|
||||
|
||||
|
||||
class Stability_SD3_5_Model(str, Enum):
|
||||
sd3_5_large = "sd3.5-large"
|
||||
# sd3_5_large_turbo = "sd3.5-large-turbo"
|
||||
sd3_5_medium = "sd3.5-medium"
|
||||
|
||||
|
||||
class Stability_SD3_5_GenerationMode(str, Enum):
|
||||
text_to_image = "text-to-image"
|
||||
image_to_image = "image-to-image"
|
||||
|
||||
|
||||
class StabilityStable3_5Request(BaseModel):
|
||||
model: str = Field(...)
|
||||
mode: str = Field(...)
|
||||
prompt: str = Field(...)
|
||||
negative_prompt: Optional[str] = Field(None)
|
||||
aspect_ratio: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
output_format: Optional[str] = Field(StabilityFormat.png.value)
|
||||
image: Optional[str] = Field(None)
|
||||
style_preset: Optional[str] = Field(None)
|
||||
cfg_scale: float = Field(...)
|
||||
strength: Optional[confloat(ge=0.0, le=1.0)] = Field(None)
|
||||
|
||||
|
||||
class StabilityUpscaleConservativeRequest(BaseModel):
|
||||
prompt: str = Field(...)
|
||||
negative_prompt: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
output_format: Optional[str] = Field(StabilityFormat.png.value)
|
||||
image: Optional[str] = Field(None)
|
||||
creativity: Optional[confloat(ge=0.2, le=0.5)] = Field(None)
|
||||
|
||||
|
||||
class StabilityUpscaleCreativeRequest(BaseModel):
|
||||
prompt: str = Field(...)
|
||||
negative_prompt: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
output_format: Optional[str] = Field(StabilityFormat.png.value)
|
||||
image: Optional[str] = Field(None)
|
||||
creativity: Optional[confloat(ge=0.1, le=0.5)] = Field(None)
|
||||
style_preset: Optional[str] = Field(None)
|
||||
|
||||
|
||||
class StabilityStableUltraRequest(BaseModel):
|
||||
prompt: str = Field(...)
|
||||
negative_prompt: Optional[str] = Field(None)
|
||||
aspect_ratio: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
output_format: Optional[str] = Field(StabilityFormat.png.value)
|
||||
image: Optional[str] = Field(None)
|
||||
style_preset: Optional[str] = Field(None)
|
||||
strength: Optional[confloat(ge=0.0, le=1.0)] = Field(None)
|
||||
|
||||
|
||||
class StabilityStableUltraResponse(BaseModel):
|
||||
image: Optional[str] = Field(None)
|
||||
finish_reason: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
|
||||
|
||||
class StabilityResultsGetResponse(BaseModel):
|
||||
image: Optional[str] = Field(None)
|
||||
finish_reason: Optional[str] = Field(None)
|
||||
seed: Optional[int] = Field(None)
|
||||
id: Optional[str] = Field(None)
|
||||
name: Optional[str] = Field(None)
|
||||
errors: Optional[list[str]] = Field(None)
|
||||
status: Optional[str] = Field(None)
|
||||
result: Optional[str] = Field(None)
|
||||
|
||||
|
||||
class StabilityAsyncResponse(BaseModel):
|
||||
id: Optional[str] = Field(None)
|
Reference in New Issue
Block a user