Add logs to verify optimized_attention_override is passed all the way into attention function

This commit is contained in:
Jedrzej Kosinski
2025-08-28 19:43:39 -07:00
parent a7d70e42a0
commit 48ed71caf8
2 changed files with 14 additions and 2 deletions

View File

@@ -226,7 +226,14 @@ def wrap_attn(func):
# move up the stack
frame = frame.f_back
LOG_CONTENTS["|".join(logged_stack)] = (logged_stack_to_index, logged_stack)
# check if we get what we want from transformer_options
t_check = "❌❌❌"
transformer_options = kwargs.get("transformer_options", None)
if transformer_options is not None:
if "optimized_attention_override" in transformer_options:
t_check = "✅✅✅"
LOG_CONTENTS["|".join(logged_stack)] = (t_check, logged_stack_to_index, logged_stack)
finally:
# Important: break ref cycles so tensors aren't pinned

View File

@@ -1034,8 +1034,13 @@ class CFGGuider:
self,
comfy.patcher_extension.get_all_wrappers(comfy.patcher_extension.WrappersMP.OUTER_SAMPLE, self.model_options, is_model_options=True)
)
# comfy.ldm.modules.attention.LOG_ATTN_CALLS = True #TODO: Remove this $$$$$
comfy.ldm.modules.attention.LOG_ATTN_CALLS = True #TODO: Remove this $$$$$
comfy.ldm.modules.attention.LOG_CONTENTS = {}
if "optimized_attention_override" not in self.model_options["transformer_options"]:
def optimized_attention_override(func, *args, **kwargs):
return func(*args, **kwargs)
self.model_options["transformer_options"]["optimized_attention_override"] = optimized_attention_override
output = executor.execute(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
finally:
cast_to_load_options(self.model_options, device=self.model_patcher.offload_device)