mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-09-12 20:48:22 +00:00
Add --cpu to use the cpu for inference.
This commit is contained in:
16
comfy/sd.py
16
comfy/sd.py
@@ -299,7 +299,7 @@ class CLIP:
|
||||
return cond
|
||||
|
||||
class VAE:
|
||||
def __init__(self, ckpt_path=None, scale_factor=0.18215, device="cuda", config=None):
|
||||
def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
|
||||
if config is None:
|
||||
#default SD1.x/SD2.x VAE parameters
|
||||
ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0}
|
||||
@@ -308,6 +308,8 @@ class VAE:
|
||||
self.first_stage_model = AutoencoderKL(**(config['params']), ckpt_path=ckpt_path)
|
||||
self.first_stage_model = self.first_stage_model.eval()
|
||||
self.scale_factor = scale_factor
|
||||
if device is None:
|
||||
device = model_management.get_torch_device()
|
||||
self.device = device
|
||||
|
||||
def decode(self, samples):
|
||||
@@ -381,11 +383,13 @@ def resize_image_to(tensor, target_latent_tensor, batched_number):
|
||||
return torch.cat([tensor] * batched_number, dim=0)
|
||||
|
||||
class ControlNet:
|
||||
def __init__(self, control_model, device="cuda"):
|
||||
def __init__(self, control_model, device=None):
|
||||
self.control_model = control_model
|
||||
self.cond_hint_original = None
|
||||
self.cond_hint = None
|
||||
self.strength = 1.0
|
||||
if device is None:
|
||||
device = model_management.get_torch_device()
|
||||
self.device = device
|
||||
self.previous_controlnet = None
|
||||
|
||||
@@ -406,7 +410,7 @@ class ControlNet:
|
||||
else:
|
||||
precision_scope = contextlib.nullcontext
|
||||
|
||||
with precision_scope(self.device):
|
||||
with precision_scope(model_management.get_autocast_device(self.device)):
|
||||
self.control_model = model_management.load_if_low_vram(self.control_model)
|
||||
control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=cond_txt)
|
||||
self.control_model = model_management.unload_if_low_vram(self.control_model)
|
||||
@@ -481,7 +485,7 @@ def load_controlnet(ckpt_path, model=None):
|
||||
context_dim = controlnet_data[key].shape[1]
|
||||
|
||||
use_fp16 = False
|
||||
if controlnet_data[key].dtype == torch.float16:
|
||||
if model_management.should_use_fp16() and controlnet_data[key].dtype == torch.float16:
|
||||
use_fp16 = True
|
||||
|
||||
control_model = cldm.ControlNet(image_size=32,
|
||||
@@ -527,10 +531,12 @@ def load_controlnet(ckpt_path, model=None):
|
||||
return control
|
||||
|
||||
class T2IAdapter:
|
||||
def __init__(self, t2i_model, channels_in, device="cuda"):
|
||||
def __init__(self, t2i_model, channels_in, device=None):
|
||||
self.t2i_model = t2i_model
|
||||
self.channels_in = channels_in
|
||||
self.strength = 1.0
|
||||
if device is None:
|
||||
device = model_management.get_torch_device()
|
||||
self.device = device
|
||||
self.previous_controlnet = None
|
||||
self.control_input = None
|
||||
|
Reference in New Issue
Block a user