From b1a60aa4aedc768d9a9bb026eedd0fc188a28770 Mon Sep 17 00:00:00 2001 From: Colin Kealty <3266127+bartowski1182@users.noreply.github.com> Date: Mon, 31 Mar 2025 13:21:07 -0400 Subject: [PATCH 1/6] Update llama-quant.cpp llama_tensor_get_type with DeepSeek friendly modifications --- src/llama-quant.cpp | 68 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 3 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 09eb570779ce5..211ee55b0ba33 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -192,9 +192,39 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; ++qs.i_attention_wv; } - else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) { + else if (qs.model.hparams.n_expert >= 8 && name.find("attn_k") != std::string::npos) { new_type = GGML_TYPE_Q4_K; } + else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q") != std::string::npos) { + new_type = GGML_TYPE_Q4_K; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_down") != std::string::npos) { + if (qs.i_ffn_down < qs.n_ffn_down/16) { + new_type = GGML_TYPE_Q4_K; + } + else if (qs.i_ffn_down < qs.n_ffn_down/8) { + new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + } + ++qs.i_ffn_down; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_gate") != std::string::npos) { + if (qs.i_ffn_gate < qs.n_ffn_gate/16) { + new_type = GGML_TYPE_Q4_K; + } + else if (qs.i_ffn_gate < qs.n_ffn_gate/8 || qs.i_ffn_gate >= 7*qs.n_ffn_gate/8) { + new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + } + ++qs.i_ffn_gate; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_up") != std::string::npos) { + if (qs.i_ffn_up < qs.n_ffn_up/16) { + new_type = GGML_TYPE_Q4_K; + } + else if (qs.i_ffn_up < qs.n_ffn_up/8) { + new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + } + ++qs.i_ffn_up; + } else if (name.find("ffn_down") != std::string::npos) { if (qs.i_ffn_down < qs.n_ffn_down/8) { new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; @@ -202,7 +232,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t ++qs.i_ffn_down; } else if (name.find("attn_output.weight") != std::string::npos) { - if (qs.model.hparams.n_expert == 8) { + if (qs.model.hparams.n_expert >= 8) { new_type = GGML_TYPE_Q5_K; } else { if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS; @@ -313,7 +343,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t ++qs.i_ffn_down; } else if (name.find("attn_output.weight") != std::string::npos) { if (arch != LLM_ARCH_FALCON) { - if (qs.model.hparams.n_expert == 8) { + if (qs.model.hparams.n_expert >= 8) { if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S || @@ -353,6 +383,38 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t new_type = GGML_TYPE_IQ3_XXS; } ++qs.i_ffn_up; + } else if (name.find("attn_kv_a_mqa") != std::string::npos) { + if (qs.model.hparams.n_expert >= 8) { + new_type = GGML_TYPE_Q8_0; + } + } else if (name.find("attn_kv_b.weight") != std::string::npos) { + if (qs.model.hparams.n_expert >= 8) { + new_type = GGML_TYPE_Q4_K; + if (qs.i_attention_wv < qs.n_attention_wv/16) { + new_type = GGML_TYPE_Q8_0; + } else if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) { + new_type = GGML_TYPE_Q6_K; + } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) new_type = GGML_TYPE_Q5_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; + } + ++qs.i_attention_wv; + } else if (name.find("attn_q_b.weight") != std::string::npos) { + if (qs.model.hparams.n_expert >= 8) { + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) { + new_type = GGML_TYPE_Q4_K; + } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; + } + } else if (name.find("attn_q_a.weight") != std::string::npos) { + if (qs.model.hparams.n_expert >= 8) { + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) { + new_type = GGML_TYPE_Q4_K; + } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; + } } // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; From e51a5e58b6cb8af18bfb54bfbbbe750732f130c1 Mon Sep 17 00:00:00 2001 From: Colin Kealty <3266127+bartowski1182@users.noreply.github.com> Date: Tue, 1 Apr 2025 19:01:40 -0400 Subject: [PATCH 2/6] Claw back a few of the changes for less dramatic file size increase --- src/llama-quant.cpp | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 211ee55b0ba33..931499da8696d 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -192,12 +192,27 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; ++qs.i_attention_wv; } - else if (qs.model.hparams.n_expert >= 8 && name.find("attn_k") != std::string::npos) { + else if (qs.model.hparams.n_expert >= 8 && name.find("attn_k.weight") != std::string::npos) { new_type = GGML_TYPE_Q4_K; } - else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q") != std::string::npos) { + else if (qs.model.hparams.n_expert >= 8 && name.find("attn_kv_a_mqa.weight") != std::string::npos) { new_type = GGML_TYPE_Q4_K; } + else if (qs.model.hparams.n_expert >= 8 && name.find("attn_kv_b.weight") != std::string::npos) { + if (qs.i_attention_wv < qs.n_attention_wv/16) { + new_type = GGML_TYPE_Q4_K; + } + else if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) { + new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + } + ++qs.i_attention_wv; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q_a.weight") != std::string::npos) { + new_type = GGML_TYPE_Q4_K; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q_b.weight") != std::string::npos) { + new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + } else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_down") != std::string::npos) { if (qs.i_ffn_down < qs.n_ffn_down/16) { new_type = GGML_TYPE_Q4_K; From c07f5d71fdf4cc8c6c3f97797844f9dc37ceaa54 Mon Sep 17 00:00:00 2001 From: Colin Kealty <3266127+bartowski1182@users.noreply.github.com> Date: Wed, 2 Apr 2025 22:32:59 -0400 Subject: [PATCH 3/6] Few more changes and tweaks --- ggml/src/ggml-common.h | 2 +- src/llama-quant.cpp | 157 +++++++++++++++++++++++++++++------------ 2 files changed, 114 insertions(+), 45 deletions(-) diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h index 086c822d73a89..4544a8046de2e 100644 --- a/ggml/src/ggml-common.h +++ b/ggml/src/ggml-common.h @@ -368,8 +368,8 @@ typedef struct { } block_iq3_xxs; static_assert(sizeof(block_iq3_xxs) == sizeof(ggml_half) + 3*(QK_K/8), "wrong iq3_xxs block size/padding"); -// 3.4375 bpw #define IQ3S_N_SCALE QK_K/64 +// 3.4375 bpw typedef struct { ggml_half d; uint8_t qs[QK_K/4]; diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 931499da8696d..107a9aacd1980 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -28,10 +28,22 @@ struct quantize_state_impl { int n_ffn_down = 0; int n_ffn_gate = 0; int n_ffn_up = 0; + int n_ffn_down_exp = 0; + int n_ffn_gate_exp = 0; + int n_ffn_up_exp = 0; + int n_ffn_down_shexp = 0; + int n_ffn_gate_shexp = 0; + int n_ffn_up_shexp = 0; int i_attention_wv = 0; int i_ffn_down = 0; int i_ffn_gate = 0; int i_ffn_up = 0; + int i_ffn_down_exp = 0; + int i_ffn_gate_exp = 0; + int i_ffn_up_exp = 0; + int i_ffn_down_shexp = 0; + int i_ffn_gate_shexp = 0; + int i_ffn_up_shexp = 0; int n_k_quantized = 0; int n_fallback = 0; @@ -119,6 +131,23 @@ static void llama_tensor_dequantize_impl( workers.clear(); } +// Check if ftype is specifically IQ2_S or IQ2_M +static inline bool is_iq2s_or_iq2m(llama_ftype ftype) { + return ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M; +} + +// Check if ftype belongs to the IQ1 group +static inline bool is_iq1_group(llama_ftype ftype) { + return ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M; +} + +// Returns the appropriate type for expert _exps tensors based on ftype +static inline ggml_type get_expert_exps_type(llama_ftype ftype) { + if (is_iq1_group(ftype)) return GGML_TYPE_IQ2_XXS; + if (is_iq2s_or_iq2m(ftype)) return GGML_TYPE_IQ3_XXS; + /* otherwise */ return GGML_TYPE_IQ2_XS; +} + static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) { const std::string name = ggml_get_name(tensor); @@ -175,7 +204,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) { new_type = GGML_TYPE_Q2_K; } - else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) { + else if (is_iq2s_or_iq2m(ftype)) { new_type = GGML_TYPE_IQ3_S; } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) { @@ -189,7 +218,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) { if (name.find("attn_v.weight") != std::string::npos) { if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K; - else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + else new_type = is_iq2s_or_iq2m(ftype) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; ++qs.i_attention_wv; } else if (qs.model.hparams.n_expert >= 8 && name.find("attn_k.weight") != std::string::npos) { @@ -199,11 +228,11 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t new_type = GGML_TYPE_Q4_K; } else if (qs.model.hparams.n_expert >= 8 && name.find("attn_kv_b.weight") != std::string::npos) { - if (qs.i_attention_wv < qs.n_attention_wv/16) { + if (qs.i_attention_wv < qs.n_attention_wv/8) { new_type = GGML_TYPE_Q4_K; } else if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) { - new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + new_type = is_iq2s_or_iq2m(ftype) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; } ++qs.i_attention_wv; } @@ -211,47 +240,83 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t new_type = GGML_TYPE_Q4_K; } else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q_b.weight") != std::string::npos) { - new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + new_type = is_iq2s_or_iq2m(ftype) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; } - else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_down") != std::string::npos) { + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_down.weight") != std::string::npos) { if (qs.i_ffn_down < qs.n_ffn_down/16) { new_type = GGML_TYPE_Q4_K; } else if (qs.i_ffn_down < qs.n_ffn_down/8) { - new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + new_type = is_iq2s_or_iq2m(ftype) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; } ++qs.i_ffn_down; } - else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_gate") != std::string::npos) { + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_gate.weight") != std::string::npos) { if (qs.i_ffn_gate < qs.n_ffn_gate/16) { new_type = GGML_TYPE_Q4_K; } - else if (qs.i_ffn_gate < qs.n_ffn_gate/8 || qs.i_ffn_gate >= 7*qs.n_ffn_gate/8) { - new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + else if (qs.i_ffn_gate < qs.n_ffn_gate/8) { + new_type = is_iq2s_or_iq2m(ftype) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; } ++qs.i_ffn_gate; } - else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_up") != std::string::npos) { + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_up.weight") != std::string::npos) { if (qs.i_ffn_up < qs.n_ffn_up/16) { new_type = GGML_TYPE_Q4_K; } else if (qs.i_ffn_up < qs.n_ffn_up/8) { - new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + new_type = is_iq2s_or_iq2m(ftype) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; } ++qs.i_ffn_up; } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_down_exps.weight") != std::string::npos) { + if (qs.i_ffn_down_exp < qs.n_ffn_down_exp/8) { + new_type = get_expert_exps_type(ftype); + } + ++qs.i_ffn_down_exp; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_gate_exps.weight") != std::string::npos) { + if (qs.i_ffn_gate_exp < qs.n_ffn_gate_exp/8) { + new_type = get_expert_exps_type(ftype); + } + ++qs.i_ffn_gate_exp; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_up_exps.weight") != std::string::npos) { + if (qs.i_ffn_up_exp < qs.n_ffn_up_exp/8) { + new_type = get_expert_exps_type(ftype); + } + ++qs.i_ffn_up_exp; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_down_shexp.weight") != std::string::npos) { + if (use_more_bits(qs.i_ffn_down_shexp, qs.n_ffn_down_shexp)) { + new_type = GGML_TYPE_Q4_K; + } + ++qs.i_ffn_down_shexp; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_gate_shexp.weight") != std::string::npos) { + if (use_more_bits(qs.i_ffn_gate_shexp, qs.n_ffn_gate_shexp)) { + new_type = GGML_TYPE_Q4_K; + } + ++qs.i_ffn_gate_shexp; + } + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_up_shexp.weight") != std::string::npos) { + if (use_more_bits(qs.i_ffn_up_shexp, qs.n_ffn_up_shexp)) { + new_type = GGML_TYPE_Q4_K; + } + ++qs.i_ffn_up_shexp; + } else if (name.find("ffn_down") != std::string::npos) { if (qs.i_ffn_down < qs.n_ffn_down/8) { - new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; + new_type = is_iq2s_or_iq2m(ftype) ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K; } ++qs.i_ffn_down; } else if (name.find("attn_output.weight") != std::string::npos) { if (qs.model.hparams.n_expert >= 8) { - new_type = GGML_TYPE_Q5_K; + new_type = is_iq2s_or_iq2m(ftype) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K; } else { - if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS; - else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S; + if (is_iq1_group(ftype)) new_type = GGML_TYPE_IQ2_XXS; + else if (is_iq2s_or_iq2m(ftype)) new_type = GGML_TYPE_IQ3_S; } } } else if (name.find("attn_v.weight") != std::string::npos) { @@ -398,38 +463,28 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t new_type = GGML_TYPE_IQ3_XXS; } ++qs.i_ffn_up; - } else if (name.find("attn_kv_a_mqa") != std::string::npos) { - if (qs.model.hparams.n_expert >= 8) { + } else if (qs.model.hparams.n_expert >= 8 && name.find("attn_kv_a_mqa.weight") != std::string::npos) { + new_type = GGML_TYPE_Q8_0; + } else if (qs.model.hparams.n_expert >= 8 && name.find("attn_kv_b.weight") != std::string::npos) { + new_type = GGML_TYPE_Q4_K; + if (qs.i_attention_wv < qs.n_attention_wv/16) { new_type = GGML_TYPE_Q8_0; + } else if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) { + new_type = GGML_TYPE_Q6_K; } - } else if (name.find("attn_kv_b.weight") != std::string::npos) { - if (qs.model.hparams.n_expert >= 8) { - new_type = GGML_TYPE_Q4_K; - if (qs.i_attention_wv < qs.n_attention_wv/16) { - new_type = GGML_TYPE_Q8_0; - } else if (use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) { - new_type = GGML_TYPE_Q6_K; - } - else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) new_type = GGML_TYPE_Q5_K; - else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; - } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) new_type = GGML_TYPE_Q5_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; ++qs.i_attention_wv; - } else if (name.find("attn_q_b.weight") != std::string::npos) { - if (qs.model.hparams.n_expert >= 8) { - if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) { - new_type = GGML_TYPE_Q4_K; - } - else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K; - else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; - } - } else if (name.find("attn_q_a.weight") != std::string::npos) { - if (qs.model.hparams.n_expert >= 8) { - if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) { - new_type = GGML_TYPE_Q4_K; - } - else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K; - else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; + } else if (qs.model.hparams.n_expert >= 8 &&name.find("attn_q_b.weight") != std::string::npos) { + if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) { + new_type = GGML_TYPE_Q4_K; } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q5_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K; + } else if (qs.model.hparams.n_expert >= 8 && name.find("attn_q_a.weight") != std::string::npos) { + new_type = GGML_TYPE_Q5_K; + if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q6_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q8_0; } // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; @@ -695,9 +750,23 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: ++qs.n_attention_wv; } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) { qs.has_output = true; + } else if (name.find("ffn_gate_exps.weight") != std::string::npos) { + ++qs.n_ffn_gate_exp; + } else if (name.find("ffn_gate_shexp.weight") != std::string::npos) { + ++qs.n_ffn_gate_shexp; + } else if (name.find("ffn_down_exps.weight") != std::string::npos) { + ++qs.n_ffn_down_exp; + } else if (name.find("ffn_down_shexp.weight") != std::string::npos) { + ++qs.n_ffn_down_shexp; + } else if (name.find("ffn_up_exps.weight") != std::string::npos) { + ++qs.n_ffn_up_exp; + } else if (name.find("ffn_up_shexp.weight") != std::string::npos) { + ++qs.n_ffn_up_shexp; } } + GGML_ASSERT(qs.n_ffn_down_exp != 0); + qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer; // sanity checks for models that have attention layers From db2d562c1a135a84da5e72a95206de1c27a6cb71 Mon Sep 17 00:00:00 2001 From: Colin Kealty <3266127+bartowski1182@users.noreply.github.com> Date: Wed, 2 Apr 2025 22:52:13 -0400 Subject: [PATCH 4/6] Remove debug assert --- src/llama-quant.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 107a9aacd1980..3821e563774ce 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -765,8 +765,6 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: } } - GGML_ASSERT(qs.n_ffn_down_exp != 0); - qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer; // sanity checks for models that have attention layers From feae28b88dabb834fb47f12738dab7d2b0a80720 Mon Sep 17 00:00:00 2001 From: Colin Kealty <3266127+bartowski1182@users.noreply.github.com> Date: Wed, 2 Apr 2025 23:02:06 -0400 Subject: [PATCH 5/6] Remove trailing whitespaces --- src/llama-quant.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 3821e563774ce..36ea6f47f0421 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -251,7 +251,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t } ++qs.i_ffn_down; } - else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_gate.weight") != std::string::npos) { + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_gate.weight") != std::string::npos) { if (qs.i_ffn_gate < qs.n_ffn_gate/16) { new_type = GGML_TYPE_Q4_K; } @@ -260,7 +260,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t } ++qs.i_ffn_gate; } - else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_up.weight") != std::string::npos) { + else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_up.weight") != std::string::npos) { if (qs.i_ffn_up < qs.n_ffn_up/16) { new_type = GGML_TYPE_Q4_K; } From 71ab7420dd5fd6a8902f7e89d3028c747a593615 Mon Sep 17 00:00:00 2001 From: Colin Kealty <3266127+bartowski1182@users.noreply.github.com> Date: Thu, 3 Apr 2025 19:45:46 -0400 Subject: [PATCH 6/6] A bit more weight to shared experts for larger sizes --- src/llama-quant.cpp | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 36ea6f47f0421..57fa6376366e7 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -376,6 +376,30 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) { new_type = GGML_TYPE_IQ2_S; } + } else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_down_shexp.weight") != std::string::npos) { + new_type = GGML_TYPE_Q5_K; + if (use_more_bits(qs.i_ffn_down_shexp, qs.n_ffn_down_shexp)) { + new_type = GGML_TYPE_Q8_0; + } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q6_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q8_0; + ++qs.i_ffn_down_shexp; + } else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_gate_shexp.weight") != std::string::npos) { + new_type = GGML_TYPE_Q5_K; + if (use_more_bits(qs.i_ffn_gate_shexp, qs.n_ffn_gate_shexp)) { + new_type = GGML_TYPE_Q8_0; + } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q6_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q8_0; + ++qs.i_ffn_gate_shexp; + } else if (qs.model.hparams.n_expert >= 8 && name.find("ffn_up_shexp.weight") != std::string::npos) { + new_type = GGML_TYPE_Q5_K; + if (use_more_bits(qs.i_ffn_up_shexp, qs.n_ffn_up_shexp)) { + new_type = GGML_TYPE_Q8_0; + } + else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q6_K; + else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q8_0; + ++qs.i_ffn_up_shexp; } else if (name.find("ffn_down") != std::string::npos) { auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str()); int i_layer = info.first, n_layer = info.second;