@@ -2269,7 +2269,7 @@ def set_gguf_parameters(self):
2269
2269
self .gguf_writer .add_rope_scaling_orig_ctx_len (self .hparams ["rope_scaling" ]["original_max_position_embeddings" ])
2270
2270
2271
2271
2272
- @Model .register ("Qwen2VLForConditionalGeneration" )
2272
+ @Model .register ("Qwen2VLForConditionalGeneration" , "Qwen2_5_VLForConditionalGeneration" )
2273
2273
class Qwen2VLModel (Model ):
2274
2274
model_arch = gguf .MODEL_ARCH .QWEN2VL
2275
2275
@@ -4419,6 +4419,29 @@ def prepare_tensors(self):
4419
4419
raise ValueError (f"Unprocessed experts: { experts } " )
4420
4420
4421
4421
4422
+ @Model .register ("PLMForCausalLM" )
4423
+ class PLMModel (Model ):
4424
+ model_arch = gguf .MODEL_ARCH .PLM
4425
+
4426
+ def set_vocab (self ):
4427
+ self ._set_vocab_gpt2 ()
4428
+
4429
+ def set_gguf_parameters (self ):
4430
+ super ().set_gguf_parameters ()
4431
+ hparams = self .hparams
4432
+ self .gguf_writer .add_vocab_size (hparams ["vocab_size" ])
4433
+ self .gguf_writer .add_kv_lora_rank (hparams ["kv_lora_rank" ])
4434
+ self .gguf_writer .add_key_length (hparams ["qk_nope_head_dim" ] + hparams ["qk_rope_head_dim" ])
4435
+ self .gguf_writer .add_value_length (hparams ["v_head_dim" ])
4436
+ self .gguf_writer .add_rope_dimension_count (hparams ["qk_rope_head_dim" ])
4437
+
4438
+ def modify_tensors (self , data_torch : Tensor , name : str , bid : int | None ) -> Iterable [tuple [str , Tensor ]]:
4439
+ return [(self .map_tensor_name (name ), data_torch )]
4440
+
4441
+ def prepare_tensors (self ):
4442
+ super ().prepare_tensors ()
4443
+
4444
+
4422
4445
@Model .register ("T5WithLMHeadModel" )
4423
4446
@Model .register ("T5ForConditionalGeneration" )
4424
4447
@Model .register ("MT5ForConditionalGeneration" )
0 commit comments