mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-07-27 08:16:44 +00:00
restore nodes order as it in the V1 version for smaller git diff (2)
This commit is contained in:
parent
918ca7f2ea
commit
e55b540899
File diff suppressed because it is too large
Load Diff
@ -49,28 +49,6 @@ class CLIPTextEncodeFlux(io.ComfyNode):
|
|||||||
|
|
||||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}))
|
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}))
|
||||||
|
|
||||||
|
|
||||||
class FluxDisableGuidance(io.ComfyNode):
|
|
||||||
@classmethod
|
|
||||||
def define_schema(cls):
|
|
||||||
return io.Schema(
|
|
||||||
node_id="FluxDisableGuidance_V3",
|
|
||||||
category="advanced/conditioning/flux",
|
|
||||||
description="This node completely disables the guidance embed on Flux and Flux like models",
|
|
||||||
inputs=[
|
|
||||||
io.Conditioning.Input("conditioning"),
|
|
||||||
],
|
|
||||||
outputs=[
|
|
||||||
io.Conditioning.Output(),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def execute(cls, conditioning):
|
|
||||||
c = node_helpers.conditioning_set_values(conditioning, {"guidance": None})
|
|
||||||
return io.NodeOutput(c)
|
|
||||||
|
|
||||||
|
|
||||||
class FluxGuidance(io.ComfyNode):
|
class FluxGuidance(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def define_schema(cls):
|
def define_schema(cls):
|
||||||
@ -91,6 +69,25 @@ class FluxGuidance(io.ComfyNode):
|
|||||||
c = node_helpers.conditioning_set_values(conditioning, {"guidance": guidance})
|
c = node_helpers.conditioning_set_values(conditioning, {"guidance": guidance})
|
||||||
return io.NodeOutput(c)
|
return io.NodeOutput(c)
|
||||||
|
|
||||||
|
class FluxDisableGuidance(io.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="FluxDisableGuidance_V3",
|
||||||
|
category="advanced/conditioning/flux",
|
||||||
|
description="This node completely disables the guidance embed on Flux and Flux like models",
|
||||||
|
inputs=[
|
||||||
|
io.Conditioning.Input("conditioning"),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Conditioning.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, conditioning):
|
||||||
|
c = node_helpers.conditioning_set_values(conditioning, {"guidance": None})
|
||||||
|
return io.NodeOutput(c)
|
||||||
|
|
||||||
class FluxKontextImageScale(io.ComfyNode):
|
class FluxKontextImageScale(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -117,7 +114,7 @@ class FluxKontextImageScale(io.ComfyNode):
|
|||||||
return io.NodeOutput(image)
|
return io.NodeOutput(image)
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [
|
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||||
CLIPTextEncodeFlux,
|
CLIPTextEncodeFlux,
|
||||||
FluxDisableGuidance,
|
FluxDisableGuidance,
|
||||||
FluxGuidance,
|
FluxGuidance,
|
||||||
|
@ -125,7 +125,7 @@ class FreeU_V2(io.ComfyNode):
|
|||||||
return io.NodeOutput(m)
|
return io.NodeOutput(m)
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [
|
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||||
FreeU,
|
FreeU,
|
||||||
FreeU_V2,
|
FreeU_V2,
|
||||||
]
|
]
|
||||||
|
@ -6,33 +6,6 @@ import folder_paths
|
|||||||
from comfy_api.latest import io
|
from comfy_api.latest import io
|
||||||
|
|
||||||
|
|
||||||
class CLIPTextEncodeHiDream(io.ComfyNode):
|
|
||||||
@classmethod
|
|
||||||
def define_schema(cls):
|
|
||||||
return io.Schema(
|
|
||||||
node_id="CLIPTextEncodeHiDream_V3",
|
|
||||||
category="advanced/conditioning",
|
|
||||||
inputs=[
|
|
||||||
io.Clip.Input("clip"),
|
|
||||||
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
|
||||||
io.String.Input("clip_g", multiline=True, dynamic_prompts=True),
|
|
||||||
io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
|
|
||||||
io.String.Input("llama", multiline=True, dynamic_prompts=True),
|
|
||||||
],
|
|
||||||
outputs=[
|
|
||||||
io.Conditioning.Output(),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def execute(cls, clip, clip_l, clip_g, t5xxl, llama):
|
|
||||||
tokens = clip.tokenize(clip_g)
|
|
||||||
tokens["l"] = clip.tokenize(clip_l)["l"]
|
|
||||||
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
|
||||||
tokens["llama"] = clip.tokenize(llama)["llama"]
|
|
||||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
|
||||||
|
|
||||||
|
|
||||||
class QuadrupleCLIPLoader(io.ComfyNode):
|
class QuadrupleCLIPLoader(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def define_schema(cls):
|
def define_schema(cls):
|
||||||
@ -65,7 +38,34 @@ class QuadrupleCLIPLoader(io.ComfyNode):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [
|
class CLIPTextEncodeHiDream(io.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="CLIPTextEncodeHiDream_V3",
|
||||||
|
category="advanced/conditioning",
|
||||||
|
inputs=[
|
||||||
|
io.Clip.Input("clip"),
|
||||||
|
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
|
||||||
|
io.String.Input("clip_g", multiline=True, dynamic_prompts=True),
|
||||||
|
io.String.Input("t5xxl", multiline=True, dynamic_prompts=True),
|
||||||
|
io.String.Input("llama", multiline=True, dynamic_prompts=True),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Conditioning.Output(),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, clip, clip_l, clip_g, t5xxl, llama):
|
||||||
|
tokens = clip.tokenize(clip_g)
|
||||||
|
tokens["l"] = clip.tokenize(clip_l)["l"]
|
||||||
|
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
||||||
|
tokens["llama"] = clip.tokenize(llama)["llama"]
|
||||||
|
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
||||||
|
|
||||||
|
|
||||||
|
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||||
CLIPTextEncodeHiDream,
|
CLIPTextEncodeHiDream,
|
||||||
QuadrupleCLIPLoader,
|
QuadrupleCLIPLoader,
|
||||||
]
|
]
|
||||||
|
@ -7,16 +7,6 @@ import node_helpers
|
|||||||
import nodes
|
import nodes
|
||||||
from comfy_api.latest import io
|
from comfy_api.latest import io
|
||||||
|
|
||||||
PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = (
|
|
||||||
"<|start_header_id|>system<|end_header_id|>\n\n<image>\nDescribe the video by detailing the following aspects according to the reference image: "
|
|
||||||
"1. The main content and theme of the video."
|
|
||||||
"2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
|
|
||||||
"3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
|
|
||||||
"4. background environment, light, style and atmosphere."
|
|
||||||
"5. camera angles, movements, and transitions used in the video:<|eot_id|>\n\n"
|
|
||||||
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
|
|
||||||
"<|start_header_id|>assistant<|end_header_id|>\n\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
class CLIPTextEncodeHunyuanDiT(io.ComfyNode):
|
class CLIPTextEncodeHunyuanDiT(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -68,6 +58,51 @@ class EmptyHunyuanLatentVideo(io.ComfyNode):
|
|||||||
return io.NodeOutput({"samples":latent})
|
return io.NodeOutput({"samples":latent})
|
||||||
|
|
||||||
|
|
||||||
|
PROMPT_TEMPLATE_ENCODE_VIDEO_I2V = (
|
||||||
|
"<|start_header_id|>system<|end_header_id|>\n\n<image>\nDescribe the video by detailing the following aspects according to the reference image: "
|
||||||
|
"1. The main content and theme of the video."
|
||||||
|
"2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects."
|
||||||
|
"3. Actions, events, behaviors temporal relationships, physical movement changes of the objects."
|
||||||
|
"4. background environment, light, style and atmosphere."
|
||||||
|
"5. camera angles, movements, and transitions used in the video:<|eot_id|>\n\n"
|
||||||
|
"<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>"
|
||||||
|
"<|start_header_id|>assistant<|end_header_id|>\n\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="TextEncodeHunyuanVideo_ImageToVideo_V3",
|
||||||
|
category="advanced/conditioning",
|
||||||
|
inputs=[
|
||||||
|
io.Clip.Input("clip"),
|
||||||
|
io.ClipVisionOutput.Input("clip_vision_output"),
|
||||||
|
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
|
||||||
|
io.Int.Input(
|
||||||
|
"image_interleave",
|
||||||
|
default=2,
|
||||||
|
min=1,
|
||||||
|
max=512,
|
||||||
|
tooltip="How much the image influences things vs the text prompt. Higher number means more influence from the text prompt.",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Conditioning.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, clip, clip_vision_output, prompt, image_interleave):
|
||||||
|
tokens = clip.tokenize(
|
||||||
|
prompt, llama_template=PROMPT_TEMPLATE_ENCODE_VIDEO_I2V,
|
||||||
|
image_embeds=clip_vision_output.mm_projected,
|
||||||
|
image_interleave=image_interleave,
|
||||||
|
)
|
||||||
|
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
||||||
|
|
||||||
|
|
||||||
class HunyuanImageToVideo(io.ComfyNode):
|
class HunyuanImageToVideo(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def define_schema(cls):
|
def define_schema(cls):
|
||||||
@ -126,40 +161,7 @@ class HunyuanImageToVideo(io.ComfyNode):
|
|||||||
return io.NodeOutput(positive, out_latent)
|
return io.NodeOutput(positive, out_latent)
|
||||||
|
|
||||||
|
|
||||||
class TextEncodeHunyuanVideo_ImageToVideo(io.ComfyNode):
|
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||||
@classmethod
|
|
||||||
def define_schema(cls):
|
|
||||||
return io.Schema(
|
|
||||||
node_id="TextEncodeHunyuanVideo_ImageToVideo_V3",
|
|
||||||
category="advanced/conditioning",
|
|
||||||
inputs=[
|
|
||||||
io.Clip.Input("clip"),
|
|
||||||
io.ClipVisionOutput.Input("clip_vision_output"),
|
|
||||||
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
|
|
||||||
io.Int.Input(
|
|
||||||
"image_interleave",
|
|
||||||
default=2,
|
|
||||||
min=1,
|
|
||||||
max=512,
|
|
||||||
tooltip="How much the image influences things vs the text prompt. Higher number means more influence from the text prompt.",
|
|
||||||
),
|
|
||||||
],
|
|
||||||
outputs=[
|
|
||||||
io.Conditioning.Output(),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def execute(cls, clip, clip_vision_output, prompt, image_interleave):
|
|
||||||
tokens = clip.tokenize(
|
|
||||||
prompt, llama_template=PROMPT_TEMPLATE_ENCODE_VIDEO_I2V,
|
|
||||||
image_embeds=clip_vision_output.mm_projected,
|
|
||||||
image_interleave=image_interleave,
|
|
||||||
)
|
|
||||||
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
|
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [
|
|
||||||
CLIPTextEncodeHunyuanDiT,
|
CLIPTextEncodeHunyuanDiT,
|
||||||
EmptyHunyuanLatentVideo,
|
EmptyHunyuanLatentVideo,
|
||||||
HunyuanImageToVideo,
|
HunyuanImageToVideo,
|
||||||
|
@ -661,7 +661,7 @@ class VoxelToMeshBasic(io.ComfyNode):
|
|||||||
return io.NodeOutput(MESH(torch.stack(vertices), torch.stack(faces)))
|
return io.NodeOutput(MESH(torch.stack(vertices), torch.stack(faces)))
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [
|
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||||
EmptyLatentHunyuan3Dv2,
|
EmptyLatentHunyuan3Dv2,
|
||||||
Hunyuan3Dv2Conditioning,
|
Hunyuan3Dv2Conditioning,
|
||||||
Hunyuan3Dv2ConditioningMultiView,
|
Hunyuan3Dv2ConditioningMultiView,
|
||||||
|
@ -44,16 +44,15 @@ class LatentAdd(io.ComfyNode):
|
|||||||
return io.NodeOutput(samples_out)
|
return io.NodeOutput(samples_out)
|
||||||
|
|
||||||
|
|
||||||
class LatentApplyOperation(io.ComfyNode):
|
class LatentSubtract(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def define_schema(cls):
|
def define_schema(cls):
|
||||||
return io.Schema(
|
return io.Schema(
|
||||||
node_id="LatentApplyOperation_V3",
|
node_id="LatentSubtract_V3",
|
||||||
category="latent/advanced/operations",
|
category="latent/advanced",
|
||||||
is_experimental=True,
|
|
||||||
inputs=[
|
inputs=[
|
||||||
io.Latent.Input("samples"),
|
io.Latent.Input("samples1"),
|
||||||
io.LatentOperation.Input("operation"),
|
io.Latent.Input("samples2"),
|
||||||
],
|
],
|
||||||
outputs=[
|
outputs=[
|
||||||
io.Latent.Output(),
|
io.Latent.Output(),
|
||||||
@ -61,44 +60,78 @@ class LatentApplyOperation(io.ComfyNode):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def execute(cls, samples, operation):
|
def execute(cls, samples1, samples2):
|
||||||
samples_out = samples.copy()
|
samples_out = samples1.copy()
|
||||||
|
|
||||||
s1 = samples["samples"]
|
s1 = samples1["samples"]
|
||||||
samples_out["samples"] = operation(latent=s1)
|
s2 = samples2["samples"]
|
||||||
|
|
||||||
|
s2 = reshape_latent_to(s1.shape, s2)
|
||||||
|
samples_out["samples"] = s1 - s2
|
||||||
return io.NodeOutput(samples_out)
|
return io.NodeOutput(samples_out)
|
||||||
|
|
||||||
|
|
||||||
class LatentApplyOperationCFG(io.ComfyNode):
|
class LatentMultiply(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def define_schema(cls):
|
def define_schema(cls):
|
||||||
return io.Schema(
|
return io.Schema(
|
||||||
node_id="LatentApplyOperationCFG_V3",
|
node_id="LatentMultiply_V3",
|
||||||
category="latent/advanced/operations",
|
category="latent/advanced",
|
||||||
is_experimental=True,
|
|
||||||
inputs=[
|
inputs=[
|
||||||
io.Model.Input("model"),
|
io.Latent.Input("samples"),
|
||||||
io.LatentOperation.Input("operation"),
|
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
|
||||||
],
|
],
|
||||||
outputs=[
|
outputs=[
|
||||||
io.Model.Output(),
|
io.Latent.Output(),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def execute(cls, model, operation):
|
def execute(cls, samples, multiplier):
|
||||||
m = model.clone()
|
samples_out = samples.copy()
|
||||||
|
|
||||||
def pre_cfg_function(args):
|
s1 = samples["samples"]
|
||||||
conds_out = args["conds_out"]
|
samples_out["samples"] = s1 * multiplier
|
||||||
if len(conds_out) == 2:
|
return io.NodeOutput(samples_out)
|
||||||
conds_out[0] = operation(latent=(conds_out[0] - conds_out[1])) + conds_out[1]
|
|
||||||
else:
|
|
||||||
conds_out[0] = operation(latent=conds_out[0])
|
|
||||||
return conds_out
|
|
||||||
|
|
||||||
m.set_model_sampler_pre_cfg_function(pre_cfg_function)
|
|
||||||
return io.NodeOutput(m)
|
class LatentInterpolate(io.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="LatentInterpolate_V3",
|
||||||
|
category="latent/advanced",
|
||||||
|
inputs=[
|
||||||
|
io.Latent.Input("samples1"),
|
||||||
|
io.Latent.Input("samples2"),
|
||||||
|
io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Latent.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, samples1, samples2, ratio):
|
||||||
|
samples_out = samples1.copy()
|
||||||
|
|
||||||
|
s1 = samples1["samples"]
|
||||||
|
s2 = samples2["samples"]
|
||||||
|
|
||||||
|
s2 = reshape_latent_to(s1.shape, s2)
|
||||||
|
|
||||||
|
m1 = torch.linalg.vector_norm(s1, dim=(1))
|
||||||
|
m2 = torch.linalg.vector_norm(s2, dim=(1))
|
||||||
|
|
||||||
|
s1 = torch.nan_to_num(s1 / m1)
|
||||||
|
s2 = torch.nan_to_num(s2 / m2)
|
||||||
|
|
||||||
|
t = (s1 * ratio + s2 * (1.0 - ratio))
|
||||||
|
mt = torch.linalg.vector_norm(t, dim=(1))
|
||||||
|
st = torch.nan_to_num(t / mt)
|
||||||
|
|
||||||
|
samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio))
|
||||||
|
return io.NodeOutput(samples_out)
|
||||||
|
|
||||||
|
|
||||||
class LatentBatch(io.ComfyNode):
|
class LatentBatch(io.ComfyNode):
|
||||||
@ -159,54 +192,16 @@ class LatentBatchSeedBehavior(io.ComfyNode):
|
|||||||
return io.NodeOutput(samples_out)
|
return io.NodeOutput(samples_out)
|
||||||
|
|
||||||
|
|
||||||
class LatentInterpolate(io.ComfyNode):
|
class LatentApplyOperation(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def define_schema(cls):
|
def define_schema(cls):
|
||||||
return io.Schema(
|
return io.Schema(
|
||||||
node_id="LatentInterpolate_V3",
|
node_id="LatentApplyOperation_V3",
|
||||||
category="latent/advanced",
|
category="latent/advanced/operations",
|
||||||
inputs=[
|
is_experimental=True,
|
||||||
io.Latent.Input("samples1"),
|
|
||||||
io.Latent.Input("samples2"),
|
|
||||||
io.Float.Input("ratio", default=1.0, min=0.0, max=1.0, step=0.01),
|
|
||||||
],
|
|
||||||
outputs=[
|
|
||||||
io.Latent.Output(),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def execute(cls, samples1, samples2, ratio):
|
|
||||||
samples_out = samples1.copy()
|
|
||||||
|
|
||||||
s1 = samples1["samples"]
|
|
||||||
s2 = samples2["samples"]
|
|
||||||
|
|
||||||
s2 = reshape_latent_to(s1.shape, s2)
|
|
||||||
|
|
||||||
m1 = torch.linalg.vector_norm(s1, dim=(1))
|
|
||||||
m2 = torch.linalg.vector_norm(s2, dim=(1))
|
|
||||||
|
|
||||||
s1 = torch.nan_to_num(s1 / m1)
|
|
||||||
s2 = torch.nan_to_num(s2 / m2)
|
|
||||||
|
|
||||||
t = (s1 * ratio + s2 * (1.0 - ratio))
|
|
||||||
mt = torch.linalg.vector_norm(t, dim=(1))
|
|
||||||
st = torch.nan_to_num(t / mt)
|
|
||||||
|
|
||||||
samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio))
|
|
||||||
return io.NodeOutput(samples_out)
|
|
||||||
|
|
||||||
|
|
||||||
class LatentMultiply(io.ComfyNode):
|
|
||||||
@classmethod
|
|
||||||
def define_schema(cls):
|
|
||||||
return io.Schema(
|
|
||||||
node_id="LatentMultiply_V3",
|
|
||||||
category="latent/advanced",
|
|
||||||
inputs=[
|
inputs=[
|
||||||
io.Latent.Input("samples"),
|
io.Latent.Input("samples"),
|
||||||
io.Float.Input("multiplier", default=1.0, min=-10.0, max=10.0, step=0.01),
|
io.LatentOperation.Input("operation"),
|
||||||
],
|
],
|
||||||
outputs=[
|
outputs=[
|
||||||
io.Latent.Output(),
|
io.Latent.Output(),
|
||||||
@ -214,14 +209,81 @@ class LatentMultiply(io.ComfyNode):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def execute(cls, samples, multiplier):
|
def execute(cls, samples, operation):
|
||||||
samples_out = samples.copy()
|
samples_out = samples.copy()
|
||||||
|
|
||||||
s1 = samples["samples"]
|
s1 = samples["samples"]
|
||||||
samples_out["samples"] = s1 * multiplier
|
samples_out["samples"] = operation(latent=s1)
|
||||||
return io.NodeOutput(samples_out)
|
return io.NodeOutput(samples_out)
|
||||||
|
|
||||||
|
|
||||||
|
class LatentApplyOperationCFG(io.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="LatentApplyOperationCFG_V3",
|
||||||
|
category="latent/advanced/operations",
|
||||||
|
is_experimental=True,
|
||||||
|
inputs=[
|
||||||
|
io.Model.Input("model"),
|
||||||
|
io.LatentOperation.Input("operation"),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Model.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, model, operation):
|
||||||
|
m = model.clone()
|
||||||
|
|
||||||
|
def pre_cfg_function(args):
|
||||||
|
conds_out = args["conds_out"]
|
||||||
|
if len(conds_out) == 2:
|
||||||
|
conds_out[0] = operation(latent=(conds_out[0] - conds_out[1])) + conds_out[1]
|
||||||
|
else:
|
||||||
|
conds_out[0] = operation(latent=conds_out[0])
|
||||||
|
return conds_out
|
||||||
|
|
||||||
|
m.set_model_sampler_pre_cfg_function(pre_cfg_function)
|
||||||
|
return io.NodeOutput(m)
|
||||||
|
|
||||||
|
|
||||||
|
class LatentOperationTonemapReinhard(io.ComfyNode):
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="LatentOperationTonemapReinhard_V3",
|
||||||
|
category="latent/advanced/operations",
|
||||||
|
is_experimental=True,
|
||||||
|
inputs=[
|
||||||
|
io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.LatentOperation.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, multiplier):
|
||||||
|
def tonemap_reinhard(latent, **kwargs):
|
||||||
|
latent_vector_magnitude = (torch.linalg.vector_norm(latent, dim=(1)) + 0.0000000001)[:,None]
|
||||||
|
normalized_latent = latent / latent_vector_magnitude
|
||||||
|
|
||||||
|
mean = torch.mean(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
|
||||||
|
std = torch.std(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
|
||||||
|
|
||||||
|
top = (std * 5 + mean) * multiplier
|
||||||
|
|
||||||
|
#reinhard
|
||||||
|
latent_vector_magnitude *= (1.0 / top)
|
||||||
|
new_magnitude = latent_vector_magnitude / (latent_vector_magnitude + 1.0)
|
||||||
|
new_magnitude *= top
|
||||||
|
|
||||||
|
return normalized_latent * new_magnitude
|
||||||
|
return io.NodeOutput(tonemap_reinhard)
|
||||||
|
|
||||||
|
|
||||||
class LatentOperationSharpen(io.ComfyNode):
|
class LatentOperationSharpen(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def define_schema(cls):
|
def define_schema(cls):
|
||||||
@ -264,69 +326,7 @@ class LatentOperationSharpen(io.ComfyNode):
|
|||||||
return io.NodeOutput(sharpen)
|
return io.NodeOutput(sharpen)
|
||||||
|
|
||||||
|
|
||||||
class LatentOperationTonemapReinhard(io.ComfyNode):
|
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||||
@classmethod
|
|
||||||
def define_schema(cls):
|
|
||||||
return io.Schema(
|
|
||||||
node_id="LatentOperationTonemapReinhard_V3",
|
|
||||||
category="latent/advanced/operations",
|
|
||||||
is_experimental=True,
|
|
||||||
inputs=[
|
|
||||||
io.Float.Input("multiplier", default=1.0, min=0.0, max=100.0, step=0.01),
|
|
||||||
],
|
|
||||||
outputs=[
|
|
||||||
io.LatentOperation.Output(),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def execute(cls, multiplier):
|
|
||||||
def tonemap_reinhard(latent, **kwargs):
|
|
||||||
latent_vector_magnitude = (torch.linalg.vector_norm(latent, dim=(1)) + 0.0000000001)[:,None]
|
|
||||||
normalized_latent = latent / latent_vector_magnitude
|
|
||||||
|
|
||||||
mean = torch.mean(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
|
|
||||||
std = torch.std(latent_vector_magnitude, dim=(1,2,3), keepdim=True)
|
|
||||||
|
|
||||||
top = (std * 5 + mean) * multiplier
|
|
||||||
|
|
||||||
#reinhard
|
|
||||||
latent_vector_magnitude *= (1.0 / top)
|
|
||||||
new_magnitude = latent_vector_magnitude / (latent_vector_magnitude + 1.0)
|
|
||||||
new_magnitude *= top
|
|
||||||
|
|
||||||
return normalized_latent * new_magnitude
|
|
||||||
return io.NodeOutput(tonemap_reinhard)
|
|
||||||
|
|
||||||
|
|
||||||
class LatentSubtract(io.ComfyNode):
|
|
||||||
@classmethod
|
|
||||||
def define_schema(cls):
|
|
||||||
return io.Schema(
|
|
||||||
node_id="LatentSubtract_V3",
|
|
||||||
category="latent/advanced",
|
|
||||||
inputs=[
|
|
||||||
io.Latent.Input("samples1"),
|
|
||||||
io.Latent.Input("samples2"),
|
|
||||||
],
|
|
||||||
outputs=[
|
|
||||||
io.Latent.Output(),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def execute(cls, samples1, samples2):
|
|
||||||
samples_out = samples1.copy()
|
|
||||||
|
|
||||||
s1 = samples1["samples"]
|
|
||||||
s2 = samples2["samples"]
|
|
||||||
|
|
||||||
s2 = reshape_latent_to(s1.shape, s2)
|
|
||||||
samples_out["samples"] = s1 - s2
|
|
||||||
return io.NodeOutput(samples_out)
|
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [
|
|
||||||
LatentAdd,
|
LatentAdd,
|
||||||
LatentApplyOperation,
|
LatentApplyOperation,
|
||||||
LatentApplyOperationCFG,
|
LatentApplyOperationCFG,
|
||||||
|
@ -172,7 +172,7 @@ class Preview3DAnimation(io.ComfyNode):
|
|||||||
return io.NodeOutput(ui=ui.PreviewUI3D(model_file, camera_info, cls=cls))
|
return io.NodeOutput(ui=ui.PreviewUI3D(model_file, camera_info, cls=cls))
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [
|
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||||
Load3D,
|
Load3D,
|
||||||
Load3DAnimation,
|
Load3DAnimation,
|
||||||
Preview3D,
|
Preview3D,
|
||||||
|
@ -516,7 +516,7 @@ class ModelSamplingLTXV(io.ComfyNode):
|
|||||||
return io.NodeOutput(m)
|
return io.NodeOutput(m)
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [
|
NODES_LIST: list[type[io.ComfyNode]] = [
|
||||||
EmptyLTXVLatentVideo,
|
EmptyLTXVLatentVideo,
|
||||||
LTXVAddGuide,
|
LTXVAddGuide,
|
||||||
LTXVConditioning,
|
LTXVConditioning,
|
||||||
|
@ -59,4 +59,6 @@ class OptimalStepsScheduler(io.ComfyNode):
|
|||||||
return io.NodeOutput(torch.FloatTensor(sigmas))
|
return io.NodeOutput(torch.FloatTensor(sigmas))
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [OptimalStepsScheduler]
|
NODES_LIST = [
|
||||||
|
OptimalStepsScheduler,
|
||||||
|
]
|
||||||
|
@ -57,4 +57,6 @@ class PerturbedAttentionGuidance(io.ComfyNode):
|
|||||||
return io.NodeOutput(m)
|
return io.NodeOutput(m)
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [PerturbedAttentionGuidance]
|
NODES_LIST = [
|
||||||
|
PerturbedAttentionGuidance,
|
||||||
|
]
|
||||||
|
@ -109,4 +109,6 @@ class PerpNegGuider(io.ComfyNode):
|
|||||||
return io.NodeOutput(guider)
|
return io.NodeOutput(guider)
|
||||||
|
|
||||||
|
|
||||||
NODES_LIST = [PerpNegGuider]
|
NODES_LIST = [
|
||||||
|
PerpNegGuider,
|
||||||
|
]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user