mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-06-08 15:17:14 +00:00
* Add Ideogram generate node. * Add staging api. * Add API_NODE and common error for missing auth token (#5) * Add Minimax Video Generation + Async Task queue polling example (#6) * [Minimax] Show video preview and embed workflow in ouput (#7) * Remove uv.lock * Remove polling operations. * Revert "Remove polling operations." * Update stubs. * Added Ideogram and Minimax back in. * Added initial BFL Flux 1.1 [pro] Ultra node (#11) * Add --comfy-api-base launch arg (#13) * Add instructions for staging development. (#14) * remove validation to make it easier to run against LAN copies of the API * Manually add BFL polling status response schema (#15) * Add function for uploading files. (#18) * Add Luma nodes (#16) * Refactor util functions (#20) * Add VIDEO type (#21) * Add rest of Luma node functionality (#19) * Fix image_luma_ref not working (#28) * [Bug] Remove duplicated option T2V-01 in MinimaxTextToVideoNode (#31) * Add utils to map from pydantic model fields to comfy node inputs (#30) * add veo2, bump av req (#32) * Add Recraft nodes (#29) * Add Kling Nodes (#12) * Add Camera Concepts (luma_concepts) to Luma Video nodes (#33) * Add Runway nodes (#17) * Convert Minimax node to use VIDEO output type (#34) * Standard `CATEGORY` system for api nodes (#35) * Set `Content-Type` header when uploading files (#36) * add better error propagation to veo2 (#37) * Add Realistic Image and Logo Raster styles for Recraft v3 (#38) * Fix runway image upload and progress polling (#39) * Fix image upload for Luma: only include `Content-Type` header field if it's set explicitly (#40) * Moved Luma nodes to nodes_luma.py (#47) * Moved Recraft nodes to nodes_recraft.py (#48) * Add Pixverse nodes (#46) * Move and fix BFL nodes to node_bfl.py (#49) * Move and edit Minimax node to nodes_minimax.py (#50) * Add Minimax Image to Video node + Cleanup (#51) * Add Recraft Text to Vector node, add Save SVG node to handle its output (#53) * Added pixverse_template support to Pixverse Text to Video node (#54) * Added Recraft Controls + Recraft Color RGB nodes (#57) * split remaining nodes out of nodes_api, make utility lib, refactor ideogram (#61) * Add types and doctstrings to utils file (#64) * Fix: `PollingOperation` progress bar update progress by absolute value (#65) * Use common download function in kling nodes module (#67) * Fix: Luma video nodes in `api nodes/image` category (#68) * Set request type explicitly (#66) * Add `control_after_generate` to all seed inputs (#69) * Fix bug: deleting `Content-Type` when property does not exist (#73) * Add preview to Save SVG node (#74) * change default poll interval (#76), rework veo2 * Add Pixverse and updated Kling types (#75) * Added Pixverse Image to VIdeo node (#77) * Add Pixverse Transition Video node (#79) * Proper ray-1-6 support as fix has been applied in backend (#80) * Added Recraft Style - Infinite Style Library node (#82) * add ideogram v3 (#83) * [Kling] Split Camera Control config to its own node (#81) * Add Pika i2v and t2v nodes (#52) * Temporary Fix for Runway (#87) * Added Stability Stable Image Ultra node (#86) * Remove Runway nodes (#88) * Fix: Prompt text can't be validated in Kling nodes when using primitive nodes (#90) * Fix: typo in node name "Stabiliy" => "Stability" (#91) * Add String (Multiline) node (#93) * Update Pika Duration and Resolution options (#94) * Change base branch to master. Not main. (#95) * Fix UploadRequest file_name param (#98) * Removed Infinite Style Library until later (#99) * fix ideogram style types (#100) * fix multi image return (#101) * add metadata saving to SVG (#102) * Bump templates version to include API node template workflows (#104) * Fix: `download_url_to_video_output` return type (#103) * fix 4o generation bug (#106) * Serve SVG files directly (#107) * Add a bunch of nodes, 3 ready to use, the rest waiting for endpoint support (#108) * Revert "Serve SVG files directly" (#111) * Expose 4 remaining Recraft nodes (#112) * [Kling] Add `Duration` and `Video ID` outputs (#105) * Fix: datamodel-codegen sets string#binary type to non-existent `bytes_aliased` variable (#114) * Fix: Dall-e 2 not setting request content-type dynamically (#113) * Default request timeout: one hour. (#116) * Add Kling nodes: camera control, start-end frame, lip-sync, video extend (#115) * Add 8 nodes - 4 BFL, 4 Stability (#117) * Fix error for Recraft ImageToImage error for nonexistent random_seed param (#118) * Add remaining Pika nodes (#119) * Make controls input work for Recraft Image to Image node (#120) * Use upstream PR: Support saving Comfy VIDEO type to buffer (#123) * Use Upstream PR: "Fix: Error creating video when sliced audio tensor chunks are non-c-contiguous" (#127) * Improve audio upload utils (#128) * Fix: Nested `AnyUrl` in request model cannot be serialized (Kling, Runway) (#129) * Show errors and API output URLs to the user (change log levels) (#131) * Fix: Luma I2I fails when weight is <=0.01 (#132) * Change category of `LumaConcepts` node from image to video (#133) * Fix: `image.shape` accessed before `image` is null-checked (#134) * Apply small fixes and most prompt validation (if needed to avoid API error) (#135) * Node name/category modifications (#140) * Add back Recraft Style - Infinite Style Library node (#141) * Fixed Kling: Check attributes of pydantic types. (#144) * Bump `comfyui-workflow-templates` version (#142) * [Kling] Print response data when error validating response (#146) * Fix: error validating Kling image response, trying to use `"key" in` on Pydantic class instance (#147) * [Kling] Fix: Correct/verify supported subset of input combos in Kling nodes (#149) * [Kling] Fix typo in node description (#150) * [Kling] Fix: CFG min/max not being enforced (#151) * Rebase launch-rebase (private) on prep-branch (public copy of master) (#153) * Bump templates version (#154) * Fix: Kling image gen nodes don't return entire batch when `n` > 1 (#152) * Remove pixverse_template from PixVerse Transition Video node (#155) * Invert image_weight value on Luma Image to Image node (#156) * Invert and resize mask for Ideogram V3 node to match masking conventions (#158) * [Kling] Fix: image generation nodes not returning Tuple (#159) * [Bug] [Kling] Fix Kling camera control (#161) * Kling Image Gen v2 + improve node descriptions for Flux/OpenAI (#160) * [Kling] Don't return video_id from dual effect video (#162) * Bump frontend to 1.18.8 (#163) * Use 3.9 compat syntax (#164) * Use Python 3.10 * add example env var * Update templates to 0.1.11 * Bump frontend to 1.18.9 --------- Co-authored-by: Robin Huang <robin.j.huang@gmail.com> Co-authored-by: Christian Byrne <cbyrne@comfy.org> Co-authored-by: thot experiment <94414189+thot-experiment@users.noreply.github.com>
907 lines
30 KiB
Python
907 lines
30 KiB
Python
import io
|
|
from inspect import cleandoc
|
|
from comfy.comfy_types.node_typing import IO, ComfyNodeABC
|
|
from comfy_api_nodes.apis.bfl_api import (
|
|
BFLStatus,
|
|
BFLFluxExpandImageRequest,
|
|
BFLFluxFillImageRequest,
|
|
BFLFluxCannyImageRequest,
|
|
BFLFluxDepthImageRequest,
|
|
BFLFluxProGenerateRequest,
|
|
BFLFluxProUltraGenerateRequest,
|
|
BFLFluxProGenerateResponse,
|
|
)
|
|
from comfy_api_nodes.apis.client import (
|
|
ApiEndpoint,
|
|
HttpMethod,
|
|
SynchronousOperation,
|
|
)
|
|
from comfy_api_nodes.apinode_utils import (
|
|
downscale_image_tensor,
|
|
validate_aspect_ratio,
|
|
process_image_response,
|
|
resize_mask_to_image,
|
|
validate_string,
|
|
)
|
|
|
|
import numpy as np
|
|
from PIL import Image
|
|
import requests
|
|
import torch
|
|
import base64
|
|
import time
|
|
|
|
|
|
def convert_mask_to_image(mask: torch.Tensor):
|
|
"""
|
|
Make mask have the expected amount of dims (4) and channels (3) to be recognized as an image.
|
|
"""
|
|
mask = mask.unsqueeze(-1)
|
|
mask = torch.cat([mask]*3, dim=-1)
|
|
return mask
|
|
|
|
|
|
def handle_bfl_synchronous_operation(
|
|
operation: SynchronousOperation, timeout_bfl_calls=360
|
|
):
|
|
response_api: BFLFluxProGenerateResponse = operation.execute()
|
|
return _poll_until_generated(
|
|
response_api.polling_url, timeout=timeout_bfl_calls
|
|
)
|
|
|
|
def _poll_until_generated(polling_url: str, timeout=360):
|
|
# used bfl-comfy-nodes to verify code implementation:
|
|
# https://github.com/black-forest-labs/bfl-comfy-nodes/tree/main
|
|
start_time = time.time()
|
|
retries_404 = 0
|
|
max_retries_404 = 5
|
|
retry_404_seconds = 2
|
|
retry_202_seconds = 2
|
|
retry_pending_seconds = 1
|
|
request = requests.Request(method=HttpMethod.GET, url=polling_url)
|
|
# NOTE: should True loop be replaced with checking if workflow has been interrupted?
|
|
while True:
|
|
response = requests.Session().send(request.prepare())
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
if result["status"] == BFLStatus.ready:
|
|
img_url = result["result"]["sample"]
|
|
img_response = requests.get(img_url)
|
|
return process_image_response(img_response)
|
|
elif result["status"] in [
|
|
BFLStatus.request_moderated,
|
|
BFLStatus.content_moderated,
|
|
]:
|
|
status = result["status"]
|
|
raise Exception(
|
|
f"BFL API did not return an image due to: {status}."
|
|
)
|
|
elif result["status"] == BFLStatus.error:
|
|
raise Exception(f"BFL API encountered an error: {result}.")
|
|
elif result["status"] == BFLStatus.pending:
|
|
time.sleep(retry_pending_seconds)
|
|
continue
|
|
elif response.status_code == 404:
|
|
if retries_404 < max_retries_404:
|
|
retries_404 += 1
|
|
time.sleep(retry_404_seconds)
|
|
continue
|
|
raise Exception(
|
|
f"BFL API could not find task after {max_retries_404} tries."
|
|
)
|
|
elif response.status_code == 202:
|
|
time.sleep(retry_202_seconds)
|
|
elif time.time() - start_time > timeout:
|
|
raise Exception(
|
|
f"BFL API experienced a timeout; could not return request under {timeout} seconds."
|
|
)
|
|
else:
|
|
raise Exception(f"BFL API encountered an error: {response.json()}")
|
|
|
|
def convert_image_to_base64(image: torch.Tensor):
|
|
scaled_image = downscale_image_tensor(image, total_pixels=2048 * 2048)
|
|
# remove batch dimension if present
|
|
if len(scaled_image.shape) > 3:
|
|
scaled_image = scaled_image[0]
|
|
image_np = (scaled_image.numpy() * 255).astype(np.uint8)
|
|
img = Image.fromarray(image_np)
|
|
img_byte_arr = io.BytesIO()
|
|
img.save(img_byte_arr, format="PNG")
|
|
return base64.b64encode(img_byte_arr.getvalue()).decode()
|
|
|
|
|
|
class FluxProUltraImageNode(ComfyNodeABC):
|
|
"""
|
|
Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.
|
|
"""
|
|
|
|
MINIMUM_RATIO = 1 / 4
|
|
MAXIMUM_RATIO = 4 / 1
|
|
MINIMUM_RATIO_STR = "1:4"
|
|
MAXIMUM_RATIO_STR = "4:1"
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the image generation",
|
|
},
|
|
),
|
|
"prompt_upsampling": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "The random seed used for creating the noise.",
|
|
},
|
|
),
|
|
"aspect_ratio": (
|
|
IO.STRING,
|
|
{
|
|
"default": "16:9",
|
|
"tooltip": "Aspect ratio of image; must be between 1:4 and 4:1.",
|
|
},
|
|
),
|
|
"raw": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "When True, generate less processed, more natural-looking images.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
"image_prompt": (IO.IMAGE,),
|
|
"image_prompt_strength": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 0.1,
|
|
"min": 0.0,
|
|
"max": 1.0,
|
|
"step": 0.01,
|
|
"tooltip": "Blend between the prompt and the image prompt.",
|
|
},
|
|
),
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
@classmethod
|
|
def VALIDATE_INPUTS(cls, aspect_ratio: str):
|
|
try:
|
|
validate_aspect_ratio(
|
|
aspect_ratio,
|
|
minimum_ratio=cls.MINIMUM_RATIO,
|
|
maximum_ratio=cls.MAXIMUM_RATIO,
|
|
minimum_ratio_str=cls.MINIMUM_RATIO_STR,
|
|
maximum_ratio_str=cls.MAXIMUM_RATIO_STR,
|
|
)
|
|
except Exception as e:
|
|
return str(e)
|
|
return True
|
|
|
|
RETURN_TYPES = (IO.IMAGE,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/image/BFL"
|
|
|
|
def api_call(
|
|
self,
|
|
prompt: str,
|
|
aspect_ratio: str,
|
|
prompt_upsampling=False,
|
|
raw=False,
|
|
seed=0,
|
|
image_prompt=None,
|
|
image_prompt_strength=0.1,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
if image_prompt is None:
|
|
validate_string(prompt, strip_whitespace=False)
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/bfl/flux-pro-1.1-ultra/generate",
|
|
method=HttpMethod.POST,
|
|
request_model=BFLFluxProUltraGenerateRequest,
|
|
response_model=BFLFluxProGenerateResponse,
|
|
),
|
|
request=BFLFluxProUltraGenerateRequest(
|
|
prompt=prompt,
|
|
prompt_upsampling=prompt_upsampling,
|
|
seed=seed,
|
|
aspect_ratio=validate_aspect_ratio(
|
|
aspect_ratio,
|
|
minimum_ratio=self.MINIMUM_RATIO,
|
|
maximum_ratio=self.MAXIMUM_RATIO,
|
|
minimum_ratio_str=self.MINIMUM_RATIO_STR,
|
|
maximum_ratio_str=self.MAXIMUM_RATIO_STR,
|
|
),
|
|
raw=raw,
|
|
image_prompt=(
|
|
image_prompt
|
|
if image_prompt is None
|
|
else convert_image_to_base64(image_prompt)
|
|
),
|
|
image_prompt_strength=(
|
|
None if image_prompt is None else round(image_prompt_strength, 2)
|
|
),
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
output_image = handle_bfl_synchronous_operation(operation)
|
|
return (output_image,)
|
|
|
|
|
|
|
|
class FluxProImageNode(ComfyNodeABC):
|
|
"""
|
|
Generates images synchronously based on prompt and resolution.
|
|
"""
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the image generation",
|
|
},
|
|
),
|
|
"prompt_upsampling": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
|
|
},
|
|
),
|
|
"width": (
|
|
IO.INT,
|
|
{
|
|
"default": 1024,
|
|
"min": 256,
|
|
"max": 1440,
|
|
"step": 32,
|
|
},
|
|
),
|
|
"height": (
|
|
IO.INT,
|
|
{
|
|
"default": 768,
|
|
"min": 256,
|
|
"max": 1440,
|
|
"step": 32,
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "The random seed used for creating the noise.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
"image_prompt": (IO.IMAGE,),
|
|
# "image_prompt_strength": (
|
|
# IO.FLOAT,
|
|
# {
|
|
# "default": 0.1,
|
|
# "min": 0.0,
|
|
# "max": 1.0,
|
|
# "step": 0.01,
|
|
# "tooltip": "Blend between the prompt and the image prompt.",
|
|
# },
|
|
# ),
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = (IO.IMAGE,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/image/BFL"
|
|
|
|
def api_call(
|
|
self,
|
|
prompt: str,
|
|
prompt_upsampling,
|
|
width: int,
|
|
height: int,
|
|
seed=0,
|
|
image_prompt=None,
|
|
# image_prompt_strength=0.1,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
image_prompt = (
|
|
image_prompt
|
|
if image_prompt is None
|
|
else convert_image_to_base64(image_prompt)
|
|
)
|
|
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/bfl/flux-pro-1.1/generate",
|
|
method=HttpMethod.POST,
|
|
request_model=BFLFluxProGenerateRequest,
|
|
response_model=BFLFluxProGenerateResponse,
|
|
),
|
|
request=BFLFluxProGenerateRequest(
|
|
prompt=prompt,
|
|
prompt_upsampling=prompt_upsampling,
|
|
width=width,
|
|
height=height,
|
|
seed=seed,
|
|
image_prompt=image_prompt,
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
output_image = handle_bfl_synchronous_operation(operation)
|
|
return (output_image,)
|
|
|
|
|
|
class FluxProExpandNode(ComfyNodeABC):
|
|
"""
|
|
Outpaints image based on prompt.
|
|
"""
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"image": (IO.IMAGE,),
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the image generation",
|
|
},
|
|
),
|
|
"prompt_upsampling": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
|
|
},
|
|
),
|
|
"top": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 2048,
|
|
"tooltip": "Number of pixels to expand at the top of the image"
|
|
},
|
|
),
|
|
"bottom": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 2048,
|
|
"tooltip": "Number of pixels to expand at the bottom of the image"
|
|
},
|
|
),
|
|
"left": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 2048,
|
|
"tooltip": "Number of pixels to expand at the left side of the image"
|
|
},
|
|
),
|
|
"right": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 2048,
|
|
"tooltip": "Number of pixels to expand at the right side of the image"
|
|
},
|
|
),
|
|
"guidance": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 60,
|
|
"min": 1.5,
|
|
"max": 100,
|
|
"tooltip": "Guidance strength for the image generation process"
|
|
},
|
|
),
|
|
"steps": (
|
|
IO.INT,
|
|
{
|
|
"default": 50,
|
|
"min": 15,
|
|
"max": 50,
|
|
"tooltip": "Number of steps for the image generation process"
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "The random seed used for creating the noise.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = (IO.IMAGE,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/image/BFL"
|
|
|
|
def api_call(
|
|
self,
|
|
image: torch.Tensor,
|
|
prompt: str,
|
|
prompt_upsampling: bool,
|
|
top: int,
|
|
bottom: int,
|
|
left: int,
|
|
right: int,
|
|
steps: int,
|
|
guidance: float,
|
|
seed=0,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
image = convert_image_to_base64(image)
|
|
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/bfl/flux-pro-1.0-expand/generate",
|
|
method=HttpMethod.POST,
|
|
request_model=BFLFluxExpandImageRequest,
|
|
response_model=BFLFluxProGenerateResponse,
|
|
),
|
|
request=BFLFluxExpandImageRequest(
|
|
prompt=prompt,
|
|
prompt_upsampling=prompt_upsampling,
|
|
top=top,
|
|
bottom=bottom,
|
|
left=left,
|
|
right=right,
|
|
steps=steps,
|
|
guidance=guidance,
|
|
seed=seed,
|
|
image=image,
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
output_image = handle_bfl_synchronous_operation(operation)
|
|
return (output_image,)
|
|
|
|
|
|
|
|
class FluxProFillNode(ComfyNodeABC):
|
|
"""
|
|
Inpaints image based on mask and prompt.
|
|
"""
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"image": (IO.IMAGE,),
|
|
"mask": (IO.MASK,),
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the image generation",
|
|
},
|
|
),
|
|
"prompt_upsampling": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
|
|
},
|
|
),
|
|
"guidance": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 60,
|
|
"min": 1.5,
|
|
"max": 100,
|
|
"tooltip": "Guidance strength for the image generation process"
|
|
},
|
|
),
|
|
"steps": (
|
|
IO.INT,
|
|
{
|
|
"default": 50,
|
|
"min": 15,
|
|
"max": 50,
|
|
"tooltip": "Number of steps for the image generation process"
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "The random seed used for creating the noise.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = (IO.IMAGE,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/image/BFL"
|
|
|
|
def api_call(
|
|
self,
|
|
image: torch.Tensor,
|
|
mask: torch.Tensor,
|
|
prompt: str,
|
|
prompt_upsampling: bool,
|
|
steps: int,
|
|
guidance: float,
|
|
seed=0,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
# prepare mask
|
|
mask = resize_mask_to_image(mask, image)
|
|
mask = convert_image_to_base64(convert_mask_to_image(mask))
|
|
# make sure image will have alpha channel removed
|
|
image = convert_image_to_base64(image[:,:,:,:3])
|
|
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/bfl/flux-pro-1.0-fill/generate",
|
|
method=HttpMethod.POST,
|
|
request_model=BFLFluxFillImageRequest,
|
|
response_model=BFLFluxProGenerateResponse,
|
|
),
|
|
request=BFLFluxFillImageRequest(
|
|
prompt=prompt,
|
|
prompt_upsampling=prompt_upsampling,
|
|
steps=steps,
|
|
guidance=guidance,
|
|
seed=seed,
|
|
image=image,
|
|
mask=mask,
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
output_image = handle_bfl_synchronous_operation(operation)
|
|
return (output_image,)
|
|
|
|
|
|
class FluxProCannyNode(ComfyNodeABC):
|
|
"""
|
|
Generate image using a control image (canny).
|
|
"""
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"control_image": (IO.IMAGE,),
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the image generation",
|
|
},
|
|
),
|
|
"prompt_upsampling": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
|
|
},
|
|
),
|
|
"canny_low_threshold": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 0.1,
|
|
"min": 0.01,
|
|
"max": 0.99,
|
|
"step": 0.01,
|
|
"tooltip": "Low threshold for Canny edge detection; ignored if skip_processing is True"
|
|
},
|
|
),
|
|
"canny_high_threshold": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 0.4,
|
|
"min": 0.01,
|
|
"max": 0.99,
|
|
"step": 0.01,
|
|
"tooltip": "High threshold for Canny edge detection; ignored if skip_processing is True"
|
|
},
|
|
),
|
|
"skip_preprocessing": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.",
|
|
},
|
|
),
|
|
"guidance": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 30,
|
|
"min": 1,
|
|
"max": 100,
|
|
"tooltip": "Guidance strength for the image generation process"
|
|
},
|
|
),
|
|
"steps": (
|
|
IO.INT,
|
|
{
|
|
"default": 50,
|
|
"min": 15,
|
|
"max": 50,
|
|
"tooltip": "Number of steps for the image generation process"
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "The random seed used for creating the noise.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = (IO.IMAGE,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/image/BFL"
|
|
|
|
def api_call(
|
|
self,
|
|
control_image: torch.Tensor,
|
|
prompt: str,
|
|
prompt_upsampling: bool,
|
|
canny_low_threshold: float,
|
|
canny_high_threshold: float,
|
|
skip_preprocessing: bool,
|
|
steps: int,
|
|
guidance: float,
|
|
seed=0,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
control_image = convert_image_to_base64(control_image[:,:,:,:3])
|
|
preprocessed_image = None
|
|
|
|
# scale canny threshold between 0-500, to match BFL's API
|
|
def scale_value(value: float, min_val=0, max_val=500):
|
|
return min_val + value * (max_val - min_val)
|
|
canny_low_threshold = int(round(scale_value(canny_low_threshold)))
|
|
canny_high_threshold = int(round(scale_value(canny_high_threshold)))
|
|
|
|
|
|
if skip_preprocessing:
|
|
preprocessed_image = control_image
|
|
control_image = None
|
|
canny_low_threshold = None
|
|
canny_high_threshold = None
|
|
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/bfl/flux-pro-1.0-canny/generate",
|
|
method=HttpMethod.POST,
|
|
request_model=BFLFluxCannyImageRequest,
|
|
response_model=BFLFluxProGenerateResponse,
|
|
),
|
|
request=BFLFluxCannyImageRequest(
|
|
prompt=prompt,
|
|
prompt_upsampling=prompt_upsampling,
|
|
steps=steps,
|
|
guidance=guidance,
|
|
seed=seed,
|
|
control_image=control_image,
|
|
canny_low_threshold=canny_low_threshold,
|
|
canny_high_threshold=canny_high_threshold,
|
|
preprocessed_image=preprocessed_image,
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
output_image = handle_bfl_synchronous_operation(operation)
|
|
return (output_image,)
|
|
|
|
|
|
class FluxProDepthNode(ComfyNodeABC):
|
|
"""
|
|
Generate image using a control image (depth).
|
|
"""
|
|
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {
|
|
"required": {
|
|
"control_image": (IO.IMAGE,),
|
|
"prompt": (
|
|
IO.STRING,
|
|
{
|
|
"multiline": True,
|
|
"default": "",
|
|
"tooltip": "Prompt for the image generation",
|
|
},
|
|
),
|
|
"prompt_upsampling": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
|
|
},
|
|
),
|
|
"skip_preprocessing": (
|
|
IO.BOOLEAN,
|
|
{
|
|
"default": False,
|
|
"tooltip": "Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.",
|
|
},
|
|
),
|
|
"guidance": (
|
|
IO.FLOAT,
|
|
{
|
|
"default": 15,
|
|
"min": 1,
|
|
"max": 100,
|
|
"tooltip": "Guidance strength for the image generation process"
|
|
},
|
|
),
|
|
"steps": (
|
|
IO.INT,
|
|
{
|
|
"default": 50,
|
|
"min": 15,
|
|
"max": 50,
|
|
"tooltip": "Number of steps for the image generation process"
|
|
},
|
|
),
|
|
"seed": (
|
|
IO.INT,
|
|
{
|
|
"default": 0,
|
|
"min": 0,
|
|
"max": 0xFFFFFFFFFFFFFFFF,
|
|
"control_after_generate": True,
|
|
"tooltip": "The random seed used for creating the noise.",
|
|
},
|
|
),
|
|
},
|
|
"optional": {
|
|
},
|
|
"hidden": {
|
|
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = (IO.IMAGE,)
|
|
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
|
FUNCTION = "api_call"
|
|
API_NODE = True
|
|
CATEGORY = "api node/image/BFL"
|
|
|
|
def api_call(
|
|
self,
|
|
control_image: torch.Tensor,
|
|
prompt: str,
|
|
prompt_upsampling: bool,
|
|
skip_preprocessing: bool,
|
|
steps: int,
|
|
guidance: float,
|
|
seed=0,
|
|
auth_token=None,
|
|
**kwargs,
|
|
):
|
|
control_image = convert_image_to_base64(control_image[:,:,:,:3])
|
|
preprocessed_image = None
|
|
|
|
if skip_preprocessing:
|
|
preprocessed_image = control_image
|
|
control_image = None
|
|
|
|
operation = SynchronousOperation(
|
|
endpoint=ApiEndpoint(
|
|
path="/proxy/bfl/flux-pro-1.0-depth/generate",
|
|
method=HttpMethod.POST,
|
|
request_model=BFLFluxDepthImageRequest,
|
|
response_model=BFLFluxProGenerateResponse,
|
|
),
|
|
request=BFLFluxDepthImageRequest(
|
|
prompt=prompt,
|
|
prompt_upsampling=prompt_upsampling,
|
|
steps=steps,
|
|
guidance=guidance,
|
|
seed=seed,
|
|
control_image=control_image,
|
|
preprocessed_image=preprocessed_image,
|
|
),
|
|
auth_token=auth_token,
|
|
)
|
|
output_image = handle_bfl_synchronous_operation(operation)
|
|
return (output_image,)
|
|
|
|
|
|
# A dictionary that contains all nodes you want to export with their names
|
|
# NOTE: names should be globally unique
|
|
NODE_CLASS_MAPPINGS = {
|
|
"FluxProUltraImageNode": FluxProUltraImageNode,
|
|
# "FluxProImageNode": FluxProImageNode,
|
|
"FluxProExpandNode": FluxProExpandNode,
|
|
"FluxProFillNode": FluxProFillNode,
|
|
"FluxProCannyNode": FluxProCannyNode,
|
|
"FluxProDepthNode": FluxProDepthNode,
|
|
}
|
|
|
|
# A dictionary that contains the friendly/humanly readable titles for the nodes
|
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
"FluxProUltraImageNode": "Flux 1.1 [pro] Ultra Image",
|
|
# "FluxProImageNode": "Flux 1.1 [pro] Image",
|
|
"FluxProExpandNode": "Flux.1 Expand Image",
|
|
"FluxProFillNode": "Flux.1 Fill Image",
|
|
"FluxProCannyNode": "Flux.1 Canny Control Image",
|
|
"FluxProDepthNode": "Flux.1 Depth Control Image",
|
|
}
|