mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-09-14 05:25:23 +00:00
Add a CLIPLoader node to load standalone clip weights.
Put them in models/clip
This commit is contained in:
16
comfy/sd.py
16
comfy/sd.py
@@ -274,9 +274,15 @@ class CLIP:
|
||||
n.tokenizer = self.tokenizer
|
||||
return n
|
||||
|
||||
def load_from_state_dict(self, sd):
|
||||
self.cond_stage_model.transformer.load_state_dict(sd, strict=False)
|
||||
|
||||
def add_patches(self, patches, strength=1.0):
|
||||
return self.patcher.add_patches(patches, strength)
|
||||
|
||||
def clip_layer(self, layer_idx):
|
||||
return self.cond_stage_model.clip_layer(layer_idx)
|
||||
|
||||
def encode(self, text):
|
||||
tokens = self.tokenizer.tokenize_with_weights(text)
|
||||
try:
|
||||
@@ -317,6 +323,16 @@ class VAE:
|
||||
samples = samples.cpu()
|
||||
return samples
|
||||
|
||||
def load_clip(ckpt_path, embedding_directory=None):
|
||||
clip_data = load_torch_file(ckpt_path)
|
||||
config = {}
|
||||
if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
|
||||
config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
|
||||
else:
|
||||
config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
|
||||
clip = CLIP(config=config, embedding_directory=embedding_directory)
|
||||
clip.load_from_state_dict(clip_data)
|
||||
return clip
|
||||
|
||||
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
|
||||
config = OmegaConf.load(config_path)
|
||||
|
@@ -8,15 +8,26 @@ class SD2ClipModel(sd1_clip.SD1ClipModel):
|
||||
super().__init__(device=device, freeze=freeze, textmodel_json_config=textmodel_json_config)
|
||||
self.empty_tokens = [[49406] + [49407] + [0] * 75]
|
||||
if layer == "last":
|
||||
layer_idx = -1
|
||||
pass
|
||||
elif layer == "penultimate":
|
||||
layer_idx = -2
|
||||
layer_idx = -1
|
||||
self.clip_layer(layer_idx)
|
||||
elif self.layer == "hidden":
|
||||
assert layer_idx is not None
|
||||
assert abs(layer_idx) < 24
|
||||
self.clip_layer(layer_idx)
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
self.clip_layer(layer_idx)
|
||||
|
||||
def clip_layer(self, layer_idx):
|
||||
if layer_idx < 0:
|
||||
layer_idx -= 1 #The real last layer of SD2.x clip is the penultimate one. The last one might contain garbage.
|
||||
if abs(layer_idx) >= 24:
|
||||
self.layer = "hidden"
|
||||
self.layer_idx = -2
|
||||
else:
|
||||
self.layer = "hidden"
|
||||
self.layer_idx = layer_idx
|
||||
|
||||
class SD2Tokenizer(sd1_clip.SD1Tokenizer):
|
||||
def __init__(self, tokenizer_path=None):
|
||||
|
Reference in New Issue
Block a user