Disable omnigen2 fp16 on older pytorch versions. (#8672)

This commit is contained in:
comfyanonymous
2025-06-26 00:39:09 -07:00
committed by GitHub
parent 93a49a45de
commit a96e65df18
2 changed files with 13 additions and 1 deletions

View File

@@ -1290,6 +1290,13 @@ def supports_fp8_compute(device=None):
return True
def extended_fp16_support():
# TODO: check why some models work with fp16 on newer torch versions but not on older
if torch_version_numeric < (2, 7):
return False
return True
def soft_empty_cache(force=False):
global cpu_state
if cpu_state == CPUState.MPS: