mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-07-31 02:06:45 +00:00
Merge branch 'master' into worksplit-multigpu
This commit is contained in:
commit
443a795850
@ -273,6 +273,8 @@ You can install ComfyUI in Apple Mac silicon (M1 or M2) with any recent macOS ve
|
|||||||
|
|
||||||
#### DirectML (AMD Cards on Windows)
|
#### DirectML (AMD Cards on Windows)
|
||||||
|
|
||||||
|
This is very badly supported and is not recommended. There are some unofficial builds of pytorch ROCm on windows that exist that will give you a much better experience than this. This readme will be updated once official pytorch ROCm builds for windows come out.
|
||||||
|
|
||||||
```pip install torch-directml``` Then you can launch ComfyUI with: ```python main.py --directml```
|
```pip install torch-directml``` Then you can launch ComfyUI with: ```python main.py --directml```
|
||||||
|
|
||||||
#### Ascend NPUs
|
#### Ascend NPUs
|
||||||
|
@ -123,6 +123,8 @@ class ControlNetFlux(Flux):
|
|||||||
|
|
||||||
if y is None:
|
if y is None:
|
||||||
y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype)
|
y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype)
|
||||||
|
else:
|
||||||
|
y = y[:, :self.params.vec_in_dim]
|
||||||
|
|
||||||
# running on sequences img
|
# running on sequences img
|
||||||
img = self.img_in(img)
|
img = self.img_in(img)
|
||||||
|
@ -118,7 +118,7 @@ class Modulation(nn.Module):
|
|||||||
def apply_mod(tensor, m_mult, m_add=None, modulation_dims=None):
|
def apply_mod(tensor, m_mult, m_add=None, modulation_dims=None):
|
||||||
if modulation_dims is None:
|
if modulation_dims is None:
|
||||||
if m_add is not None:
|
if m_add is not None:
|
||||||
return tensor * m_mult + m_add
|
return torch.addcmul(m_add, tensor, m_mult)
|
||||||
else:
|
else:
|
||||||
return tensor * m_mult
|
return tensor * m_mult
|
||||||
else:
|
else:
|
||||||
|
@ -31,7 +31,7 @@ def dynamic_slice(
|
|||||||
starts: List[int],
|
starts: List[int],
|
||||||
sizes: List[int],
|
sizes: List[int],
|
||||||
) -> Tensor:
|
) -> Tensor:
|
||||||
slicing = [slice(start, start + size) for start, size in zip(starts, sizes)]
|
slicing = tuple(slice(start, start + size) for start, size in zip(starts, sizes))
|
||||||
return x[slicing]
|
return x[slicing]
|
||||||
|
|
||||||
class AttnChunk(NamedTuple):
|
class AttnChunk(NamedTuple):
|
||||||
|
@ -1024,6 +1024,8 @@ class CosmosPredict2(BaseModel):
|
|||||||
def process_timestep(self, timestep, x, denoise_mask=None, **kwargs):
|
def process_timestep(self, timestep, x, denoise_mask=None, **kwargs):
|
||||||
if denoise_mask is None:
|
if denoise_mask is None:
|
||||||
return timestep
|
return timestep
|
||||||
|
if denoise_mask.ndim <= 4:
|
||||||
|
return timestep
|
||||||
condition_video_mask_B_1_T_1_1 = denoise_mask.mean(dim=[1, 3, 4], keepdim=True)
|
condition_video_mask_B_1_T_1_1 = denoise_mask.mean(dim=[1, 3, 4], keepdim=True)
|
||||||
c_noise_B_1_T_1_1 = 0.0 * (1.0 - condition_video_mask_B_1_T_1_1) + timestep.reshape(timestep.shape[0], 1, 1, 1, 1) * condition_video_mask_B_1_T_1_1
|
c_noise_B_1_T_1_1 = 0.0 * (1.0 - condition_video_mask_B_1_T_1_1) + timestep.reshape(timestep.shape[0], 1, 1, 1, 1) * condition_video_mask_B_1_T_1_1
|
||||||
out = c_noise_B_1_T_1_1.squeeze(dim=[1, 3, 4])
|
out = c_noise_B_1_T_1_1.squeeze(dim=[1, 3, 4])
|
||||||
|
@ -11,6 +11,43 @@ from comfy_config.types import (
|
|||||||
PyProjectSettings
|
PyProjectSettings
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def validate_and_extract_os_classifiers(classifiers: list) -> list:
|
||||||
|
os_classifiers = [c for c in classifiers if c.startswith("Operating System :: ")]
|
||||||
|
if not os_classifiers:
|
||||||
|
return []
|
||||||
|
|
||||||
|
os_values = [c[len("Operating System :: ") :] for c in os_classifiers]
|
||||||
|
valid_os_prefixes = {"Microsoft", "POSIX", "MacOS", "OS Independent"}
|
||||||
|
|
||||||
|
for os_value in os_values:
|
||||||
|
if not any(os_value.startswith(prefix) for prefix in valid_os_prefixes):
|
||||||
|
return []
|
||||||
|
|
||||||
|
return os_values
|
||||||
|
|
||||||
|
|
||||||
|
def validate_and_extract_accelerator_classifiers(classifiers: list) -> list:
|
||||||
|
accelerator_classifiers = [c for c in classifiers if c.startswith("Environment ::")]
|
||||||
|
if not accelerator_classifiers:
|
||||||
|
return []
|
||||||
|
|
||||||
|
accelerator_values = [c[len("Environment :: ") :] for c in accelerator_classifiers]
|
||||||
|
|
||||||
|
valid_accelerators = {
|
||||||
|
"GPU :: NVIDIA CUDA",
|
||||||
|
"GPU :: AMD ROCm",
|
||||||
|
"GPU :: Intel Arc",
|
||||||
|
"NPU :: Huawei Ascend",
|
||||||
|
"GPU :: Apple Metal",
|
||||||
|
}
|
||||||
|
|
||||||
|
for accelerator_value in accelerator_values:
|
||||||
|
if accelerator_value not in valid_accelerators:
|
||||||
|
return []
|
||||||
|
|
||||||
|
return accelerator_values
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Extract configuration from a custom node directory's pyproject.toml file or a Python file.
|
Extract configuration from a custom node directory's pyproject.toml file or a Python file.
|
||||||
|
|
||||||
@ -78,6 +115,24 @@ def extract_node_configuration(path) -> Optional[PyProjectConfig]:
|
|||||||
tool_data = raw_settings.tool
|
tool_data = raw_settings.tool
|
||||||
comfy_data = tool_data.get("comfy", {}) if tool_data else {}
|
comfy_data = tool_data.get("comfy", {}) if tool_data else {}
|
||||||
|
|
||||||
|
dependencies = project_data.get("dependencies", [])
|
||||||
|
supported_comfyui_frontend_version = ""
|
||||||
|
for dep in dependencies:
|
||||||
|
if isinstance(dep, str) and dep.startswith("comfyui-frontend-package"):
|
||||||
|
supported_comfyui_frontend_version = dep.removeprefix("comfyui-frontend-package")
|
||||||
|
break
|
||||||
|
|
||||||
|
supported_comfyui_version = comfy_data.get("requires-comfyui", "")
|
||||||
|
|
||||||
|
classifiers = project_data.get('classifiers', [])
|
||||||
|
supported_os = validate_and_extract_os_classifiers(classifiers)
|
||||||
|
supported_accelerators = validate_and_extract_accelerator_classifiers(classifiers)
|
||||||
|
|
||||||
|
project_data['supported_os'] = supported_os
|
||||||
|
project_data['supported_accelerators'] = supported_accelerators
|
||||||
|
project_data['supported_comfyui_frontend_version'] = supported_comfyui_frontend_version
|
||||||
|
project_data['supported_comfyui_version'] = supported_comfyui_version
|
||||||
|
|
||||||
return PyProjectConfig(project=project_data, tool_comfy=comfy_data)
|
return PyProjectConfig(project=project_data, tool_comfy=comfy_data)
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ class ComfyConfig(BaseModel):
|
|||||||
models: List[Model] = Field(default_factory=list, alias="Models")
|
models: List[Model] = Field(default_factory=list, alias="Models")
|
||||||
includes: List[str] = Field(default_factory=list)
|
includes: List[str] = Field(default_factory=list)
|
||||||
web: Optional[str] = None
|
web: Optional[str] = None
|
||||||
|
banner_url: str = ""
|
||||||
|
|
||||||
class License(BaseModel):
|
class License(BaseModel):
|
||||||
file: str = ""
|
file: str = ""
|
||||||
@ -66,6 +66,10 @@ class ProjectConfig(BaseModel):
|
|||||||
dependencies: List[str] = Field(default_factory=list)
|
dependencies: List[str] = Field(default_factory=list)
|
||||||
license: License = Field(default_factory=License)
|
license: License = Field(default_factory=License)
|
||||||
urls: URLs = Field(default_factory=URLs)
|
urls: URLs = Field(default_factory=URLs)
|
||||||
|
supported_os: List[str] = Field(default_factory=list)
|
||||||
|
supported_accelerators: List[str] = Field(default_factory=list)
|
||||||
|
supported_comfyui_version: str = ""
|
||||||
|
supported_comfyui_frontend_version: str = ""
|
||||||
|
|
||||||
@field_validator('license', mode='before')
|
@field_validator('license', mode='before')
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -304,10 +304,23 @@ Optional spacing can be added between images.
|
|||||||
image2.movedim(-1, 1), target_w, target_h, "lanczos", "disabled"
|
image2.movedim(-1, 1), target_w, target_h, "lanczos", "disabled"
|
||||||
).movedim(1, -1)
|
).movedim(1, -1)
|
||||||
|
|
||||||
|
color_map = {
|
||||||
|
"white": 1.0,
|
||||||
|
"black": 0.0,
|
||||||
|
"red": (1.0, 0.0, 0.0),
|
||||||
|
"green": (0.0, 1.0, 0.0),
|
||||||
|
"blue": (0.0, 0.0, 1.0),
|
||||||
|
}
|
||||||
|
|
||||||
|
color_val = color_map[spacing_color]
|
||||||
|
|
||||||
# When not matching sizes, pad to align non-concat dimensions
|
# When not matching sizes, pad to align non-concat dimensions
|
||||||
if not match_image_size:
|
if not match_image_size:
|
||||||
h1, w1 = image1.shape[1:3]
|
h1, w1 = image1.shape[1:3]
|
||||||
h2, w2 = image2.shape[1:3]
|
h2, w2 = image2.shape[1:3]
|
||||||
|
pad_value = 0.0
|
||||||
|
if not isinstance(color_val, tuple):
|
||||||
|
pad_value = color_val
|
||||||
|
|
||||||
if direction in ["left", "right"]:
|
if direction in ["left", "right"]:
|
||||||
# For horizontal concat, pad heights to match
|
# For horizontal concat, pad heights to match
|
||||||
@ -316,11 +329,11 @@ Optional spacing can be added between images.
|
|||||||
if h1 < target_h:
|
if h1 < target_h:
|
||||||
pad_h = target_h - h1
|
pad_h = target_h - h1
|
||||||
pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2
|
pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2
|
||||||
image1 = torch.nn.functional.pad(image1, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=0.0)
|
image1 = torch.nn.functional.pad(image1, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=pad_value)
|
||||||
if h2 < target_h:
|
if h2 < target_h:
|
||||||
pad_h = target_h - h2
|
pad_h = target_h - h2
|
||||||
pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2
|
pad_top, pad_bottom = pad_h // 2, pad_h - pad_h // 2
|
||||||
image2 = torch.nn.functional.pad(image2, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=0.0)
|
image2 = torch.nn.functional.pad(image2, (0, 0, 0, 0, pad_top, pad_bottom), mode='constant', value=pad_value)
|
||||||
else: # up, down
|
else: # up, down
|
||||||
# For vertical concat, pad widths to match
|
# For vertical concat, pad widths to match
|
||||||
if w1 != w2:
|
if w1 != w2:
|
||||||
@ -328,11 +341,11 @@ Optional spacing can be added between images.
|
|||||||
if w1 < target_w:
|
if w1 < target_w:
|
||||||
pad_w = target_w - w1
|
pad_w = target_w - w1
|
||||||
pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2
|
pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2
|
||||||
image1 = torch.nn.functional.pad(image1, (0, 0, pad_left, pad_right), mode='constant', value=0.0)
|
image1 = torch.nn.functional.pad(image1, (0, 0, pad_left, pad_right), mode='constant', value=pad_value)
|
||||||
if w2 < target_w:
|
if w2 < target_w:
|
||||||
pad_w = target_w - w2
|
pad_w = target_w - w2
|
||||||
pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2
|
pad_left, pad_right = pad_w // 2, pad_w - pad_w // 2
|
||||||
image2 = torch.nn.functional.pad(image2, (0, 0, pad_left, pad_right), mode='constant', value=0.0)
|
image2 = torch.nn.functional.pad(image2, (0, 0, pad_left, pad_right), mode='constant', value=pad_value)
|
||||||
|
|
||||||
# Ensure same number of channels
|
# Ensure same number of channels
|
||||||
if image1.shape[-1] != image2.shape[-1]:
|
if image1.shape[-1] != image2.shape[-1]:
|
||||||
@ -366,15 +379,6 @@ Optional spacing can be added between images.
|
|||||||
if spacing_width > 0:
|
if spacing_width > 0:
|
||||||
spacing_width = spacing_width + (spacing_width % 2) # Ensure even
|
spacing_width = spacing_width + (spacing_width % 2) # Ensure even
|
||||||
|
|
||||||
color_map = {
|
|
||||||
"white": 1.0,
|
|
||||||
"black": 0.0,
|
|
||||||
"red": (1.0, 0.0, 0.0),
|
|
||||||
"green": (0.0, 1.0, 0.0),
|
|
||||||
"blue": (0.0, 0.0, 1.0),
|
|
||||||
}
|
|
||||||
color_val = color_map[spacing_color]
|
|
||||||
|
|
||||||
if direction in ["left", "right"]:
|
if direction in ["left", "right"]:
|
||||||
spacing_shape = (
|
spacing_shape = (
|
||||||
image1.shape[0],
|
image1.shape[0],
|
||||||
@ -410,6 +414,62 @@ Optional spacing can be added between images.
|
|||||||
concat_dim = 2 if direction in ["left", "right"] else 1
|
concat_dim = 2 if direction in ["left", "right"] else 1
|
||||||
return (torch.cat(images, dim=concat_dim),)
|
return (torch.cat(images, dim=concat_dim),)
|
||||||
|
|
||||||
|
class ResizeAndPadImage:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"image": ("IMAGE",),
|
||||||
|
"target_width": ("INT", {
|
||||||
|
"default": 512,
|
||||||
|
"min": 1,
|
||||||
|
"max": MAX_RESOLUTION,
|
||||||
|
"step": 1
|
||||||
|
}),
|
||||||
|
"target_height": ("INT", {
|
||||||
|
"default": 512,
|
||||||
|
"min": 1,
|
||||||
|
"max": MAX_RESOLUTION,
|
||||||
|
"step": 1
|
||||||
|
}),
|
||||||
|
"padding_color": (["white", "black"],),
|
||||||
|
"interpolation": (["area", "bicubic", "nearest-exact", "bilinear", "lanczos"],),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("IMAGE",)
|
||||||
|
FUNCTION = "resize_and_pad"
|
||||||
|
CATEGORY = "image/transform"
|
||||||
|
|
||||||
|
def resize_and_pad(self, image, target_width, target_height, padding_color, interpolation):
|
||||||
|
batch_size, orig_height, orig_width, channels = image.shape
|
||||||
|
|
||||||
|
scale_w = target_width / orig_width
|
||||||
|
scale_h = target_height / orig_height
|
||||||
|
scale = min(scale_w, scale_h)
|
||||||
|
|
||||||
|
new_width = int(orig_width * scale)
|
||||||
|
new_height = int(orig_height * scale)
|
||||||
|
|
||||||
|
image_permuted = image.permute(0, 3, 1, 2)
|
||||||
|
|
||||||
|
resized = comfy.utils.common_upscale(image_permuted, new_width, new_height, interpolation, "disabled")
|
||||||
|
|
||||||
|
pad_value = 0.0 if padding_color == "black" else 1.0
|
||||||
|
padded = torch.full(
|
||||||
|
(batch_size, channels, target_height, target_width),
|
||||||
|
pad_value,
|
||||||
|
dtype=image.dtype,
|
||||||
|
device=image.device
|
||||||
|
)
|
||||||
|
|
||||||
|
y_offset = (target_height - new_height) // 2
|
||||||
|
x_offset = (target_width - new_width) // 2
|
||||||
|
|
||||||
|
padded[:, :, y_offset:y_offset + new_height, x_offset:x_offset + new_width] = resized
|
||||||
|
|
||||||
|
output = padded.permute(0, 2, 3, 1)
|
||||||
|
return (output,)
|
||||||
|
|
||||||
class SaveSVGNode:
|
class SaveSVGNode:
|
||||||
"""
|
"""
|
||||||
@ -532,5 +592,6 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"SaveAnimatedPNG": SaveAnimatedPNG,
|
"SaveAnimatedPNG": SaveAnimatedPNG,
|
||||||
"SaveSVGNode": SaveSVGNode,
|
"SaveSVGNode": SaveSVGNode,
|
||||||
"ImageStitch": ImageStitch,
|
"ImageStitch": ImageStitch,
|
||||||
|
"ResizeAndPadImage": ResizeAndPadImage,
|
||||||
"GetImageSize": GetImageSize,
|
"GetImageSize": GetImageSize,
|
||||||
}
|
}
|
||||||
|
11
execution.py
11
execution.py
@ -429,17 +429,20 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp
|
|||||||
|
|
||||||
logging.error(f"!!! Exception during processing !!! {ex}")
|
logging.error(f"!!! Exception during processing !!! {ex}")
|
||||||
logging.error(traceback.format_exc())
|
logging.error(traceback.format_exc())
|
||||||
|
tips = ""
|
||||||
|
|
||||||
|
if isinstance(ex, comfy.model_management.OOM_EXCEPTION):
|
||||||
|
tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number."
|
||||||
|
logging.error("Got an OOM, unloading all loaded models.")
|
||||||
|
comfy.model_management.unload_all_models()
|
||||||
|
|
||||||
error_details = {
|
error_details = {
|
||||||
"node_id": real_node_id,
|
"node_id": real_node_id,
|
||||||
"exception_message": str(ex),
|
"exception_message": "{}\n{}".format(ex, tips),
|
||||||
"exception_type": exception_type,
|
"exception_type": exception_type,
|
||||||
"traceback": traceback.format_tb(tb),
|
"traceback": traceback.format_tb(tb),
|
||||||
"current_inputs": input_data_formatted
|
"current_inputs": input_data_formatted
|
||||||
}
|
}
|
||||||
if isinstance(ex, comfy.model_management.OOM_EXCEPTION):
|
|
||||||
logging.error("Got an OOM, unloading all loaded models.")
|
|
||||||
comfy.model_management.unload_all_models()
|
|
||||||
|
|
||||||
return (ExecutionResult.FAILURE, error_details, ex)
|
return (ExecutionResult.FAILURE, error_details, ex)
|
||||||
|
|
||||||
|
8
main.py
8
main.py
@ -185,7 +185,13 @@ def prompt_worker(q, server_instance):
|
|||||||
|
|
||||||
current_time = time.perf_counter()
|
current_time = time.perf_counter()
|
||||||
execution_time = current_time - execution_start_time
|
execution_time = current_time - execution_start_time
|
||||||
logging.info("Prompt executed in {:.2f} seconds".format(execution_time))
|
|
||||||
|
# Log Time in a more readable way after 10 minutes
|
||||||
|
if execution_time > 600:
|
||||||
|
execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time))
|
||||||
|
logging.info(f"Prompt executed in {execution_time}")
|
||||||
|
else:
|
||||||
|
logging.info("Prompt executed in {:.2f} seconds".format(execution_time))
|
||||||
|
|
||||||
flags = q.get_flags()
|
flags = q.get_flags()
|
||||||
free_memory = flags.get("free_memory", False)
|
free_memory = flags.get("free_memory", False)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
comfyui-frontend-package==1.21.7
|
comfyui-frontend-package==1.22.2
|
||||||
comfyui-workflow-templates==0.1.28
|
comfyui-workflow-templates==0.1.29
|
||||||
comfyui-embedded-docs==0.2.2
|
comfyui-embedded-docs==0.2.2
|
||||||
torch
|
torch
|
||||||
torchsde
|
torchsde
|
||||||
|
Loading…
x
Reference in New Issue
Block a user