Always shift text encoder to GPU when the device supports fp16.

This commit is contained in:
comfyanonymous
2023-08-23 21:45:00 -04:00
parent a6ef08a46a
commit cc44ade79e
2 changed files with 4 additions and 5 deletions

View File

@@ -545,7 +545,7 @@ class CLIP:
load_device = model_management.text_encoder_device()
offload_device = model_management.text_encoder_offload_device()
params['device'] = load_device
if model_management.should_use_fp16(load_device):
if model_management.should_use_fp16(load_device, prioritize_performance=False):
params['dtype'] = torch.float16
else:
params['dtype'] = torch.float32