mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-08-16 18:04:28 +00:00
Not sure if AMD actually support fp16 acc but it doesn't crash. (#9258)
This commit is contained in:
parent
735bb4bdb1
commit
5828607ccf
@ -340,7 +340,7 @@ if ENABLE_PYTORCH_ATTENTION:
|
|||||||
|
|
||||||
PRIORITIZE_FP16 = False # TODO: remove and replace with something that shows exactly which dtype is faster than the other
|
PRIORITIZE_FP16 = False # TODO: remove and replace with something that shows exactly which dtype is faster than the other
|
||||||
try:
|
try:
|
||||||
if is_nvidia() and PerformanceFeature.Fp16Accumulation in args.fast:
|
if (is_nvidia() or is_amd()) and PerformanceFeature.Fp16Accumulation in args.fast:
|
||||||
torch.backends.cuda.matmul.allow_fp16_accumulation = True
|
torch.backends.cuda.matmul.allow_fp16_accumulation = True
|
||||||
PRIORITIZE_FP16 = True # TODO: limit to cards where it actually boosts performance
|
PRIORITIZE_FP16 = True # TODO: limit to cards where it actually boosts performance
|
||||||
logging.info("Enabled fp16 accumulation.")
|
logging.info("Enabled fp16 accumulation.")
|
||||||
|
Loading…
x
Reference in New Issue
Block a user