From f9e55d8463da692954d84f51ca354161396fe1b8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 15 Jan 2024 03:10:22 -0500 Subject: [PATCH] Only auto enable bf16 VAE on nvidia GPUs that actually support it. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index fefd3c8c9..e12146d11 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -175,7 +175,7 @@ try: if int(torch_version[0]) >= 2: if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False: ENABLE_PYTORCH_ATTENTION = True - if torch.cuda.is_bf16_supported(): + if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8: VAE_DTYPE = torch.bfloat16 if is_intel_xpu(): if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: