mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-09-12 12:37:01 +00:00
Don't unload/reload model from CPU uselessly.
This commit is contained in:
26
comfy/model_management.py
Normal file
26
comfy/model_management.py
Normal file
@@ -0,0 +1,26 @@
|
||||
|
||||
|
||||
current_loaded_model = None
|
||||
|
||||
|
||||
def unload_model():
|
||||
global current_loaded_model
|
||||
if current_loaded_model is not None:
|
||||
current_loaded_model.model.cpu()
|
||||
current_loaded_model.unpatch_model()
|
||||
current_loaded_model = None
|
||||
|
||||
|
||||
def load_model_gpu(model):
|
||||
global current_loaded_model
|
||||
if model is current_loaded_model:
|
||||
return
|
||||
unload_model()
|
||||
try:
|
||||
real_model = model.patch_model()
|
||||
except Exception as e:
|
||||
model.unpatch_model()
|
||||
raise e
|
||||
current_loaded_model = model
|
||||
real_model.cuda()
|
||||
return current_loaded_model
|
@@ -2,6 +2,7 @@ import torch
|
||||
|
||||
import sd1_clip
|
||||
import sd2_clip
|
||||
import model_management
|
||||
from ldm.util import instantiate_from_config
|
||||
from ldm.models.autoencoder import AutoencoderKL
|
||||
from omegaconf import OmegaConf
|
||||
@@ -304,6 +305,7 @@ class VAE:
|
||||
self.device = device
|
||||
|
||||
def decode(self, samples):
|
||||
model_management.unload_model()
|
||||
self.first_stage_model = self.first_stage_model.to(self.device)
|
||||
samples = samples.to(self.device)
|
||||
pixel_samples = self.first_stage_model.decode(1. / self.scale_factor * samples)
|
||||
@@ -313,6 +315,7 @@ class VAE:
|
||||
return pixel_samples
|
||||
|
||||
def encode(self, pixel_samples):
|
||||
model_management.unload_model()
|
||||
self.first_stage_model = self.first_stage_model.to(self.device)
|
||||
pixel_samples = pixel_samples.movedim(-1,1).to(self.device)
|
||||
samples = self.first_stage_model.encode(2. * pixel_samples - 1.).sample() * self.scale_factor
|
||||
|
Reference in New Issue
Block a user