mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-09-15 14:09:28 +00:00
Add noise augmentation to hunyuan image refiner. (#9831)
This was missing and should help with colors being blown out.
This commit is contained in:
@@ -1437,6 +1437,7 @@ class HunyuanImage21Refiner(HunyuanImage21):
|
|||||||
def concat_cond(self, **kwargs):
|
def concat_cond(self, **kwargs):
|
||||||
noise = kwargs.get("noise", None)
|
noise = kwargs.get("noise", None)
|
||||||
image = kwargs.get("concat_latent_image", None)
|
image = kwargs.get("concat_latent_image", None)
|
||||||
|
noise_augmentation = kwargs.get("noise_augmentation", 0.0)
|
||||||
device = kwargs["device"]
|
device = kwargs["device"]
|
||||||
|
|
||||||
if image is None:
|
if image is None:
|
||||||
@@ -1446,6 +1447,9 @@ class HunyuanImage21Refiner(HunyuanImage21):
|
|||||||
image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center")
|
image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center")
|
||||||
image = self.process_latent_in(image)
|
image = self.process_latent_in(image)
|
||||||
image = utils.resize_to_batch_size(image, noise.shape[0])
|
image = utils.resize_to_batch_size(image, noise.shape[0])
|
||||||
|
if noise_augmentation > 0:
|
||||||
|
noise = torch.randn(image.shape, generator=torch.manual_seed(kwargs.get("seed", 0) - 10), dtype=image.dtype, device="cpu").to(image.device)
|
||||||
|
image = noise_augmentation * noise + (1.0 - noise_augmentation) * image
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def extra_conds(self, **kwargs):
|
def extra_conds(self, **kwargs):
|
||||||
|
@@ -134,6 +134,7 @@ class HunyuanRefinerLatent:
|
|||||||
return {"required": {"positive": ("CONDITIONING", ),
|
return {"required": {"positive": ("CONDITIONING", ),
|
||||||
"negative": ("CONDITIONING", ),
|
"negative": ("CONDITIONING", ),
|
||||||
"latent": ("LATENT", ),
|
"latent": ("LATENT", ),
|
||||||
|
"noise_augmentation": ("FLOAT", {"default": 0.10, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||||
}}
|
}}
|
||||||
|
|
||||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
|
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
|
||||||
@@ -141,11 +142,10 @@ class HunyuanRefinerLatent:
|
|||||||
|
|
||||||
FUNCTION = "execute"
|
FUNCTION = "execute"
|
||||||
|
|
||||||
def execute(self, positive, negative, latent):
|
def execute(self, positive, negative, latent, noise_augmentation):
|
||||||
latent = latent["samples"]
|
latent = latent["samples"]
|
||||||
|
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": latent, "noise_augmentation": noise_augmentation})
|
||||||
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": latent})
|
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": latent, "noise_augmentation": noise_augmentation})
|
||||||
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": latent})
|
|
||||||
out_latent = {}
|
out_latent = {}
|
||||||
out_latent["samples"] = torch.zeros([latent.shape[0], 32, latent.shape[-3], latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device())
|
out_latent["samples"] = torch.zeros([latent.shape[0], 32, latent.shape[-3], latent.shape[-2], latent.shape[-1]], device=comfy.model_management.intermediate_device())
|
||||||
return (positive, negative, out_latent)
|
return (positive, negative, out_latent)
|
||||||
|
Reference in New Issue
Block a user