mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-09-10 19:46:38 +00:00
49 lines
1.5 KiB
Python
49 lines
1.5 KiB
Python
import node_helpers
|
|
import comfy.utils
|
|
import math
|
|
|
|
|
|
class TextEncodeQwenImageEdit:
|
|
@classmethod
|
|
def INPUT_TYPES(s):
|
|
return {"required": {
|
|
"clip": ("CLIP", ),
|
|
"prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
|
},
|
|
"optional": {"vae": ("VAE", ),
|
|
"image": ("IMAGE", ),}}
|
|
|
|
RETURN_TYPES = ("CONDITIONING",)
|
|
FUNCTION = "encode"
|
|
|
|
CATEGORY = "advanced/conditioning"
|
|
|
|
def encode(self, clip, prompt, vae=None, image=None):
|
|
ref_latent = None
|
|
if image is None:
|
|
images = []
|
|
else:
|
|
samples = image.movedim(-1, 1)
|
|
total = int(1024 * 1024)
|
|
|
|
scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
|
|
width = round(samples.shape[3] * scale_by)
|
|
height = round(samples.shape[2] * scale_by)
|
|
|
|
s = comfy.utils.common_upscale(samples, width, height, "area", "disabled")
|
|
image = s.movedim(1, -1)
|
|
images = [image[:, :, :, :3]]
|
|
if vae is not None:
|
|
ref_latent = vae.encode(image[:, :, :, :3])
|
|
|
|
tokens = clip.tokenize(prompt, images=images)
|
|
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
|
if ref_latent is not None:
|
|
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True)
|
|
return (conditioning, )
|
|
|
|
|
|
NODE_CLASS_MAPPINGS = {
|
|
"TextEncodeQwenImageEdit": TextEncodeQwenImageEdit,
|
|
}
|