Try to free enough vram for control lora inference.

This commit is contained in:
comfyanonymous
2023-08-24 17:20:54 -04:00
parent e3d0a9a490
commit 51dde87e97
4 changed files with 30 additions and 18 deletions

View File

@@ -32,6 +32,13 @@ def save_torch_file(sd, ckpt, metadata=None):
else:
safetensors.torch.save_file(sd, ckpt)
def calculate_parameters(sd, prefix=""):
params = 0
for k in sd.keys():
if k.startswith(prefix):
params += sd[k].nelement()
return params
def transformers_convert(sd, prefix_from, prefix_to, number):
keys_to_replace = {
"{}positional_embedding": "{}embeddings.position_embedding.weight",