We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent d76426d commit 3b558e0Copy full SHA for 3b558e0
src/diffusers/models/transformers/transformer_sd3.py
@@ -278,13 +278,13 @@ def forward(
278
else:
279
lora_scale = 1.0
280
281
- if USE_PEFT_BACKEND:
282
- # weight the lora layers by setting `lora_scale` for each PEFT layer
283
- scale_lora_layers(self, lora_scale)
284
- else:
285
- logger.warning(
286
- "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
287
- )
+ # if USE_PEFT_BACKEND:
+ # # weight the lora layers by setting `lora_scale` for each PEFT layer
+ # scale_lora_layers(self, lora_scale)
+ # else:
+ # logger.warning(
+ # "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
+ # )
288
289
height, width = hidden_states.shape[-2:]
290
0 commit comments