Move code to empty gpu cache to model_management.py

This commit is contained in:
comfyanonymous
2023-04-15 11:19:07 -04:00
parent f4c689ea89
commit deb2b93e79
2 changed files with 11 additions and 7 deletions

View File

@@ -307,6 +307,15 @@ def should_use_fp16():
return True
def soft_empty_cache():
global xpu_available
if xpu_available:
torch.xpu.empty_cache()
elif torch.cuda.is_available():
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
#TODO: might be cleaner to put this somewhere else
import threading