We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 017ee16 commit de16f64Copy full SHA for de16f64
src/diffusers/loaders.py
@@ -1168,7 +1168,10 @@ def _load_text_encoder_attn_procs(
1168
cross_attention_dim = value_dict["to_k_lora.down.weight"].shape[1]
1169
hidden_size = value_dict["to_k_lora.up.weight"].shape[0]
1170
1171
- attn_processors[key] = LoRAAttnProcessor(
+ attn_processor_class = (
1172
+ LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
1173
+ )
1174
+ attn_processors[key] = attn_processor_class(
1175
hidden_size=hidden_size,
1176
cross_attention_dim=cross_attention_dim,
1177
rank=rank,
0 commit comments