Add pytorch attention support to VAE.

This commit is contained in:
comfyanonymous
2023-03-13 12:25:19 -04:00
parent a256a2abde
commit 83f23f82b8
4 changed files with 83 additions and 13 deletions

View File

@@ -41,6 +41,14 @@ else:
except:
XFORMERS_IS_AVAILBLE = False
ENABLE_PYTORCH_ATTENTION = False
if "--use-pytorch-cross-attention" in sys.argv:
torch.backends.cuda.enable_math_sdp(True)
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(True)
ENABLE_PYTORCH_ATTENTION = True
XFORMERS_IS_AVAILBLE = False
if "--cpu" in sys.argv:
vram_state = CPU
@@ -175,6 +183,9 @@ def xformers_enabled():
return False
return XFORMERS_IS_AVAILBLE
def pytorch_attention_enabled():
return ENABLE_PYTORCH_ATTENTION
def get_free_memory(dev=None, torch_free_too=False):
if dev is None:
dev = get_torch_device()