mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-09-10 11:35:40 +00:00
This node is only useful if someone trains the kontext model to properly use multiple reference images via the index method. The default is the offset method which feeds the multiple images like if they were stitched together as one. This method works with the current flux kontext model.
128 lines
3.7 KiB
Python
128 lines
3.7 KiB
Python
import node_helpers
|
|
import comfy.utils
|
|
|
|
class CLIPTextEncodeFlux:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": {
|
|
"clip": ("CLIP", ),
|
|
"clip_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
|
"t5xxl": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
|
"guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
|
|
}}
|
|
RETURN_TYPES = ("CONDITIONING",)
|
|
FUNCTION = "encode"
|
|
|
|
CATEGORY = "advanced/conditioning/flux"
|
|
|
|
def encode(self, clip, clip_l, t5xxl, guidance):
|
|
tokens = clip.tokenize(clip_l)
|
|
tokens["t5xxl"] = clip.tokenize(t5xxl)["t5xxl"]
|
|
|
|
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"guidance": guidance}), )
|
|
|
|
class FluxGuidance:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": {
|
|
"conditioning": ("CONDITIONING", ),
|
|
"guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}),
|
|
}}
|
|
|
|
RETURN_TYPES = ("CONDITIONING",)
|
|
FUNCTION = "append"
|
|
|
|
CATEGORY = "advanced/conditioning/flux"
|
|
|
|
def append(self, conditioning, guidance):
|
|
c = node_helpers.conditioning_set_values(conditioning, {"guidance": guidance})
|
|
return (c, )
|
|
|
|
|
|
class FluxDisableGuidance:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": {
|
|
"conditioning": ("CONDITIONING", ),
|
|
}}
|
|
|
|
RETURN_TYPES = ("CONDITIONING",)
|
|
FUNCTION = "append"
|
|
|
|
CATEGORY = "advanced/conditioning/flux"
|
|
DESCRIPTION = "This node completely disables the guidance embed on Flux and Flux like models"
|
|
|
|
def append(self, conditioning):
|
|
c = node_helpers.conditioning_set_values(conditioning, {"guidance": None})
|
|
return (c, )
|
|
|
|
|
|
PREFERED_KONTEXT_RESOLUTIONS = [
|
|
(672, 1568),
|
|
(688, 1504),
|
|
(720, 1456),
|
|
(752, 1392),
|
|
(800, 1328),
|
|
(832, 1248),
|
|
(880, 1184),
|
|
(944, 1104),
|
|
(1024, 1024),
|
|
(1104, 944),
|
|
(1184, 880),
|
|
(1248, 832),
|
|
(1328, 800),
|
|
(1392, 752),
|
|
(1456, 720),
|
|
(1504, 688),
|
|
(1568, 672),
|
|
]
|
|
|
|
|
|
class FluxKontextImageScale:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": {"image": ("IMAGE", ),
|
|
},
|
|
}
|
|
|
|
RETURN_TYPES = ("IMAGE",)
|
|
FUNCTION = "scale"
|
|
|
|
CATEGORY = "advanced/conditioning/flux"
|
|
DESCRIPTION = "This node resizes the image to one that is more optimal for flux kontext."
|
|
|
|
def scale(self, image):
|
|
width = image.shape[2]
|
|
height = image.shape[1]
|
|
aspect_ratio = width / height
|
|
_, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS)
|
|
image = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1)
|
|
return (image, )
|
|
|
|
|
|
class FluxKontextMultiReferenceLatentMethod:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": {
|
|
"conditioning": ("CONDITIONING", ),
|
|
"reference_latents_method": (("offset", "index"), ),
|
|
}}
|
|
|
|
RETURN_TYPES = ("CONDITIONING",)
|
|
FUNCTION = "append"
|
|
EXPERIMENTAL = True
|
|
|
|
CATEGORY = "advanced/conditioning/flux"
|
|
|
|
def append(self, conditioning, reference_latents_method):
|
|
c = node_helpers.conditioning_set_values(conditioning, {"reference_latents_method": reference_latents_method})
|
|
return (c, )
|
|
|
|
NODE_CLASS_MAPPINGS = {
|
|
"CLIPTextEncodeFlux": CLIPTextEncodeFlux,
|
|
"FluxGuidance": FluxGuidance,
|
|
"FluxDisableGuidance": FluxDisableGuidance,
|
|
"FluxKontextImageScale": FluxKontextImageScale,
|
|
"FluxKontextMultiReferenceLatentMethod": FluxKontextMultiReferenceLatentMethod,
|
|
}
|