Skip to content

Commit 55c0ba9

Browse files
authored
Merge branch 'ggml-org:master' into test
2 parents 41d136d + 75afa0a commit 55c0ba9

34 files changed

+928
-478
lines changed

README.md

+4-1
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
9797
- [x] [Flan T5](https://huggingface.co/models?search=flan-t5)
9898
- [x] [Open Elm models](https://huggingface.co/collections/apple/openelm-instruct-models-6619ad295d7ae9f868b759ca)
9999
- [x] [ChatGLM3-6b](https://huggingface.co/THUDM/chatglm3-6b) + [ChatGLM4-9b](https://huggingface.co/THUDM/glm-4-9b) + [GLMEdge-1.5b](https://huggingface.co/THUDM/glm-edge-1.5b-chat) + [GLMEdge-4b](https://huggingface.co/THUDM/glm-edge-4b-chat)
100+
- [x] [GLM-4-0414](https://huggingface.co/collections/THUDM/glm-4-0414-67f3cbcb34dd9d252707cb2e)
100101
- [x] [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966)
101102
- [x] [EXAONE-3.0-7.8B-Instruct](https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct)
102103
- [x] [FalconMamba Models](https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a)
@@ -259,7 +260,9 @@ The [Hugging Face](https://huggingface.co) platform hosts a [number of LLMs](htt
259260
- [Trending](https://huggingface.co/models?library=gguf&sort=trending)
260261
- [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf)
261262

262-
You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from Hugging Face by using this CLI argument: `-hf <user>/<model>[:quant]`
263+
You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from [Hugging Face](https://huggingface.co/) or other model hosting sites, such as [ModelScope](https://modelscope.cn/), by using this CLI argument: `-hf <user>/<model>[:quant]`.
264+
265+
By default, the CLI would download from Hugging Face, you can switch to other options with the environment variable `MODEL_ENDPOINT`. For example, you may opt to downloading model checkpoints from ModelScope or other model sharing communities by setting the environment variable, e.g. `MODEL_ENDPOINT=https://www.modelscope.cn/`.
263266

264267
After downloading a model, use the CLI tools to run it locally - see below.
265268

common/arg.cpp

+8-9
Original file line numberDiff line numberDiff line change
@@ -228,12 +228,13 @@ static bool common_download_file_single(const std::string & url, const std::stri
228228
curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
229229
curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
230230

231+
http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
231232
// Check if hf-token or bearer-token was specified
232233
if (!bearer_token.empty()) {
233234
std::string auth_header = "Authorization: Bearer " + bearer_token;
234235
http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
235-
curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
236236
}
237+
curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
237238

238239
#if defined(_WIN32)
239240
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
@@ -544,7 +545,10 @@ static struct common_hf_file_res common_get_hf_file(const std::string & hf_repo_
544545
curl_ptr curl(curl_easy_init(), &curl_easy_cleanup);
545546
curl_slist_ptr http_headers;
546547
std::string res_str;
547-
std::string url = "https://huggingface.co/v2/" + hf_repo + "/manifests/" + tag;
548+
549+
std::string model_endpoint = get_model_endpoint();
550+
551+
std::string url = model_endpoint + "v2/" + hf_repo + "/manifests/" + tag;
548552
curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
549553
curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L);
550554
typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data);
@@ -659,13 +663,8 @@ static void common_params_handle_model(
659663
}
660664
}
661665

662-
std::string hf_endpoint = "https://huggingface.co/";
663-
const char * hf_endpoint_env = getenv("HF_ENDPOINT");
664-
if (hf_endpoint_env) {
665-
hf_endpoint = hf_endpoint_env;
666-
if (hf_endpoint.back() != '/') hf_endpoint += '/';
667-
}
668-
model.url = hf_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file;
666+
std::string model_endpoint = get_model_endpoint();
667+
model.url = model_endpoint + model.hf_repo + "/resolve/main/" + model.hf_file;
669668
// make sure model path is present (for caching purposes)
670669
if (model.path.empty()) {
671670
// this is to avoid different repo having same file name, or same file name in different subdirs

common/chat.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1622,7 +1622,7 @@ static common_chat_params common_chat_templates_apply_jinja(
16221622
}
16231623

16241624
// Hermes 2/3 Pro, Qwen 2.5 Instruct (w/ tools)
1625-
if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null()) {
1625+
if (src.find("<tool_call>") != std::string::npos && params.json_schema.is_null() && params.tools.is_array() && params.json_schema.is_null()) {
16261626
return common_chat_params_init_hermes_2_pro(tmpl, params);
16271627
}
16281628

common/common.cpp

+17-2
Original file line numberDiff line numberDiff line change
@@ -830,7 +830,7 @@ std::string fs_get_cache_directory() {
830830
if (getenv("LLAMA_CACHE")) {
831831
cache_directory = std::getenv("LLAMA_CACHE");
832832
} else {
833-
#ifdef __linux__
833+
#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)
834834
if (std::getenv("XDG_CACHE_HOME")) {
835835
cache_directory = std::getenv("XDG_CACHE_HOME");
836836
} else {
@@ -840,7 +840,9 @@ std::string fs_get_cache_directory() {
840840
cache_directory = std::getenv("HOME") + std::string("/Library/Caches/");
841841
#elif defined(_WIN32)
842842
cache_directory = std::getenv("LOCALAPPDATA");
843-
#endif // __linux__
843+
#else
844+
# error Unknown architecture
845+
#endif
844846
cache_directory = ensure_trailing_slash(cache_directory);
845847
cache_directory += "llama.cpp";
846848
}
@@ -1027,6 +1029,19 @@ struct common_init_result common_init_from_params(common_params & params) {
10271029
return iparams;
10281030
}
10291031

1032+
std::string get_model_endpoint() {
1033+
const char * model_endpoint_env = getenv("MODEL_ENDPOINT");
1034+
// We still respect the use of environment-variable "HF_ENDPOINT" for backward-compatibility.
1035+
const char * hf_endpoint_env = getenv("HF_ENDPOINT");
1036+
const char * endpoint_env = model_endpoint_env ? model_endpoint_env : hf_endpoint_env;
1037+
std::string model_endpoint = "https://huggingface.co/";
1038+
if (endpoint_env) {
1039+
model_endpoint = endpoint_env;
1040+
if (model_endpoint.back() != '/') model_endpoint += '/';
1041+
}
1042+
return model_endpoint;
1043+
}
1044+
10301045
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora) {
10311046
llama_clear_adapter_lora(ctx);
10321047
for (auto & la : lora) {

common/common.h

+2
Original file line numberDiff line numberDiff line change
@@ -543,6 +543,8 @@ struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_p
543543
// clear LoRA adapters from context, then apply new list of adapters
544544
void common_set_adapter_lora(struct llama_context * ctx, std::vector<common_adapter_lora_info> & lora);
545545

546+
std::string get_model_endpoint();
547+
546548
//
547549
// Batch utils
548550
//

convert_hf_to_gguf.py

+19-1
Original file line numberDiff line numberDiff line change
@@ -735,6 +735,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
735735
if chkhsh == "d353350c764d8c3b39c763113960e4fb4919bea5fbf208a0e3b22e8469dc7406":
736736
# ref: https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct
737737
res = "llama4"
738+
if chkhsh == "a1336059768a55c99a734006ffb02203cd450fed003e9a71886c88acf24fdbc2":
739+
# ref: https://huggingface.co/THUDM/glm-4-9b-hf
740+
res = "glm4"
738741

739742
if res is None:
740743
logger.warning("\n")
@@ -4897,6 +4900,22 @@ def prepare_tensors(self):
48974900
self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
48984901

48994902

4903+
@Model.register("Glm4ForCausalLM")
4904+
class Glm4Model(Model):
4905+
model_arch = gguf.MODEL_ARCH.GLM4
4906+
4907+
def set_vocab(self):
4908+
self._set_vocab_gpt2()
4909+
4910+
def set_gguf_parameters(self):
4911+
super().set_gguf_parameters()
4912+
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
4913+
if self.hparams["rope_scaling"].get("type") == "yarn":
4914+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
4915+
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
4916+
self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
4917+
4918+
49004919
@Model.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
49014920
class ChatGLMModel(Model):
49024921
model_arch = gguf.MODEL_ARCH.CHATGLM
@@ -5588,7 +5607,6 @@ def main() -> None:
55885607
with torch.inference_mode():
55895608
output_type = ftype_map[args.outtype]
55905609
model_architecture = hparams["architectures"][0]
5591-
55925610
try:
55935611
model_class = Model.from_model_architecture(model_architecture)
55945612
except NotImplementedError:

convert_hf_to_gguf_update.py

+1
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ class TOKENIZER_TYPE(IntEnum):
114114
{"name": "trillion", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/trillionlabs/Trillion-7B-preview", },
115115
{"name": "bailingmoe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/inclusionAI/Ling-lite", },
116116
{"name": "llama4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E-Instruct", },
117+
{"name": "glm4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/THUDM/glm-4-9b-hf", },
117118
]
118119

119120

examples/llava/clip-impl.h

+16-7
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#include "ggml.h"
22
#include "gguf.h"
3+
#include "clip.h"
34

45
#include "clip.h"
56

@@ -202,23 +203,31 @@ static void clip_log_internal(enum ggml_log_level level, const char * format, ..
202203
// cpp wrappers
203204
//
204205

206+
// wrapper for clip_image_size
207+
struct clip_image_size_deleter {
208+
void operator()(clip_image_size * val) { clip_image_size_free(val); }
209+
};
210+
typedef std::unique_ptr<clip_image_size, clip_image_size_deleter> clip_image_size_ptr;
211+
212+
// wrapper for clip_image_u8
205213
struct clip_image_u8_deleter {
206214
void operator()(clip_image_u8 * val) { clip_image_u8_free(val); }
207215
};
216+
typedef std::unique_ptr<clip_image_u8, clip_image_u8_deleter> clip_image_u8_ptr;
208217

218+
// wrapper for clip_image_f32
209219
struct clip_image_f32_deleter {
210220
void operator()(clip_image_f32 * val) { clip_image_f32_free(val); }
211221
};
222+
typedef std::unique_ptr<clip_image_f32, clip_image_f32_deleter> clip_image_f32_ptr;
212223

213-
struct clip_image_f32_batch_deleter {
214-
void operator()(clip_image_f32_batch * val) { clip_image_f32_batch_free(val); }
224+
struct clip_image_u8_batch {
225+
std::vector<clip_image_u8_ptr> entries;
215226
};
216227

217-
typedef std::unique_ptr<clip_image_u8, clip_image_u8_deleter> clip_image_u8_ptr;
218-
typedef std::unique_ptr<clip_image_f32, clip_image_f32_deleter> clip_image_f32_ptr;
219-
typedef std::unique_ptr<clip_image_f32_batch, clip_image_f32_batch_deleter> clip_image_f32_batch_ptr;
220-
221-
// TODO @ngxson : we're currently having a naming clash between struct clip_image_size and function clip_image_size()
228+
struct clip_image_f32_batch {
229+
std::vector<clip_image_f32_ptr> entries;
230+
};
222231

223232
//
224233
// common utils

0 commit comments

Comments
 (0)