We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent c255573 commit 055e01bCopy full SHA for 055e01b
include/llama.h
@@ -1328,7 +1328,7 @@ extern "C" {
1328
typedef bool (*llama_opt_param_filter)(const struct ggml_tensor * tensor, void * userdata);
1329
1330
// always returns true
1331
- bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata);
+ LLAMA_API bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata);
1332
1333
struct llama_opt_params {
1334
uint32_t n_ctx_train; // assumed context size post training, use context size specified in llama_context if 0
0 commit comments