From 50db297cf6c40e04947225ee7a5224da90b82fda Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 29 Jan 2023 00:50:46 -0500 Subject: [PATCH] Try to fix OOM issues with cards that have less vram than mine. --- comfy/ldm/modules/attention.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 88c442d17..f6d16b95c 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -194,7 +194,14 @@ class CrossAttentionBirchSan(nn.Module): kv_chunk_size_min = None - query_chunk_size_x = 1024 * 4 + #not sure at all about the math here + #TODO: tweak this + if mem_free_total > 8192 * 1024 * 1024 * 1.3: + query_chunk_size_x = 1024 * 4 + elif mem_free_total > 4096 * 1024 * 1024 * 1.3: + query_chunk_size_x = 1024 * 2 + else: + query_chunk_size_x = 1024 kv_chunk_size_min_x = None kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024 if kv_chunk_size_x < 1024: