Add attention mask support to sub quad attention.

This commit is contained in:
comfyanonymous
2024-01-07 04:13:58 -05:00
parent 0c2c9fbdfa
commit aaa9017302
2 changed files with 27 additions and 4 deletions

View File

@@ -177,6 +177,7 @@ def attention_sub_quad(query, key, value, heads, mask=None):
kv_chunk_size_min=kv_chunk_size_min,
use_checkpoint=False,
upcast_attention=upcast_attention,
mask=mask,
)
hidden_states = hidden_states.to(dtype)