Skip to content

Commit 87f9130

Browse files
Revert "This doesn't seem to be needed on chroma. (Comfy-Org#8209)" (Comfy-Org#8210)
This reverts commit 7e84bf5.
1 parent 7e84bf5 commit 87f9130

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

comfy/ldm/chroma/layers.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,9 @@ def forward(self, img: Tensor, txt: Tensor, pe: Tensor, vec: Tensor, attn_mask=N
109109
txt += txt_mod1.gate * self.txt_attn.proj(txt_attn)
110110
txt += txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift)
111111

112+
if txt.dtype == torch.float16:
113+
txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504)
114+
112115
return img, txt
113116

114117

@@ -160,6 +163,8 @@ def forward(self, x: Tensor, pe: Tensor, vec: Tensor, attn_mask=None) -> Tensor:
160163
# compute activation in mlp stream, cat again and run second linear layer
161164
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
162165
x += mod.gate * output
166+
if x.dtype == torch.float16:
167+
x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504)
163168
return x
164169

165170

0 commit comments

Comments
 (0)