From 9d54066ebc005415e46e85850f77360dd33b6623 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 14 Jun 2023 13:05:08 -0400 Subject: [PATCH] This isn't needed for inference. --- comfy/ldm/modules/attention.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 4670ca578..5fb4fa2af 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -610,13 +610,12 @@ class SpatialTransformer(nn.Module): for d in range(depth)] ) if not use_linear: - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, + self.proj_out = nn.Conv2d(inner_dim,in_channels, kernel_size=1, stride=1, - padding=0)) + padding=0) else: - self.proj_out = zero_module(comfy.ops.Linear(in_channels, inner_dim)) + self.proj_out = comfy.ops.Linear(in_channels, inner_dim) self.use_linear = use_linear def forward(self, x, context=None, transformer_options={}):