Skip to content

Commit de16f64

Browse files
authored
feat: when using PT 2.0 use LoRAAttnProcessor2_0 for text enc LoRA. (#3691)
1 parent 017ee16 commit de16f64

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

src/diffusers/loaders.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1168,7 +1168,10 @@ def _load_text_encoder_attn_procs(
11681168
cross_attention_dim = value_dict["to_k_lora.down.weight"].shape[1]
11691169
hidden_size = value_dict["to_k_lora.up.weight"].shape[0]
11701170

1171-
attn_processors[key] = LoRAAttnProcessor(
1171+
attn_processor_class = (
1172+
LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
1173+
)
1174+
attn_processors[key] = attn_processor_class(
11721175
hidden_size=hidden_size,
11731176
cross_attention_dim=cross_attention_dim,
11741177
rank=rank,

0 commit comments

Comments
 (0)