From daf9d25ee282051055e773185aa161cd2a1ce3a6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Sat, 7 Jun 2025 07:01:15 -0700 Subject: [PATCH] Cleaner torch version comparisons. (#8453) --- comfy/model_management.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 8ae5a5ab..a107f0d4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -305,7 +305,7 @@ try: logging.info("AMD arch: {}".format(arch)) logging.info("ROCm version: {}".format(rocm_version)) if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: - if torch_version_numeric[0] >= 2 and torch_version_numeric[1] >= 7: # works on 2.6 but doesn't actually seem to improve much + if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much if any((a in arch) for a in ["gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches ENABLE_PYTORCH_ATTENTION = True except: @@ -328,7 +328,7 @@ except: pass try: - if torch_version_numeric[0] == 2 and torch_version_numeric[1] >= 5: + if torch_version_numeric >= (2, 5): torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True) except: logging.warning("Warning, could not set allow_fp16_bf16_reduction_math_sdp") @@ -1276,11 +1276,11 @@ def supports_fp8_compute(device=None): if props.minor < 9: return False - if torch_version_numeric[0] < 2 or (torch_version_numeric[0] == 2 and torch_version_numeric[1] < 3): + if torch_version_numeric < (2, 3): return False if WINDOWS: - if (torch_version_numeric[0] == 2 and torch_version_numeric[1] < 4): + if torch_version_numeric < (2, 4): return False return True