mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-09-13 13:05:07 +00:00
Disable pytorch attention in VAE for AMD.
This commit is contained in:
@@ -297,7 +297,7 @@ def vae_attention():
|
|||||||
if model_management.xformers_enabled_vae():
|
if model_management.xformers_enabled_vae():
|
||||||
logging.info("Using xformers attention in VAE")
|
logging.info("Using xformers attention in VAE")
|
||||||
return xformers_attention
|
return xformers_attention
|
||||||
elif model_management.pytorch_attention_enabled():
|
elif model_management.pytorch_attention_enabled_vae():
|
||||||
logging.info("Using pytorch attention in VAE")
|
logging.info("Using pytorch attention in VAE")
|
||||||
return pytorch_attention
|
return pytorch_attention
|
||||||
else:
|
else:
|
||||||
|
@@ -912,6 +912,11 @@ def pytorch_attention_enabled():
|
|||||||
global ENABLE_PYTORCH_ATTENTION
|
global ENABLE_PYTORCH_ATTENTION
|
||||||
return ENABLE_PYTORCH_ATTENTION
|
return ENABLE_PYTORCH_ATTENTION
|
||||||
|
|
||||||
|
def pytorch_attention_enabled_vae():
|
||||||
|
if is_amd():
|
||||||
|
return False # enabling pytorch attention on AMD currently causes crash when doing high res
|
||||||
|
return pytorch_attention_enabled()
|
||||||
|
|
||||||
def pytorch_attention_flash_attention():
|
def pytorch_attention_flash_attention():
|
||||||
global ENABLE_PYTORCH_ATTENTION
|
global ENABLE_PYTORCH_ATTENTION
|
||||||
if ENABLE_PYTORCH_ATTENTION:
|
if ENABLE_PYTORCH_ATTENTION:
|
||||||
|
Reference in New Issue
Block a user