@@ -116,6 +116,7 @@ pub fn build(b: *std.build.Builder) !void {
116
116
const ggml_backend = make .obj ("ggml-backend" , "ggml-backend.c" );
117
117
const ggml_quants = make .obj ("ggml-quants" , "ggml-quants.c" );
118
118
const unicode = make .obj ("unicode" , "unicode.cpp" );
119
+ const unicode_data = make .obj ("unicode-data" , "unicode-data.cpp" );
119
120
const llama = make .obj ("llama" , "llama.cpp" );
120
121
const buildinfo = make .obj ("common" , "common/build-info.cpp" );
121
122
const common = make .obj ("common" , "common/common.cpp" );
@@ -127,14 +128,14 @@ pub fn build(b: *std.build.Builder) !void {
127
128
const clip = make .obj ("clip" , "examples/llava/clip.cpp" );
128
129
const llava = make .obj ("llava" , "examples/llava/llava.cpp" );
129
130
130
- _ = make .exe ("main" , "examples/main/main.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo , sampling , console , grammar_parser });
131
- _ = make .exe ("quantize" , "examples/quantize/quantize.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo });
132
- _ = make .exe ("perplexity" , "examples/perplexity/perplexity.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo });
133
- _ = make .exe ("embedding" , "examples/embedding/embedding.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo });
134
- _ = make .exe ("finetune" , "examples/finetune/finetune.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo , train });
135
- _ = make .exe ("train-text-from-scratch" , "examples/train-text-from-scratch/train-text-from-scratch.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo , train });
131
+ _ = make .exe ("main" , "examples/main/main.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo , sampling , console , grammar_parser });
132
+ _ = make .exe ("quantize" , "examples/quantize/quantize.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo });
133
+ _ = make .exe ("perplexity" , "examples/perplexity/perplexity.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo });
134
+ _ = make .exe ("embedding" , "examples/embedding/embedding.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo });
135
+ _ = make .exe ("finetune" , "examples/finetune/finetune.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo , train });
136
+ _ = make .exe ("train-text-from-scratch" , "examples/train-text-from-scratch/train-text-from-scratch.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo , train });
136
137
137
- const server = make .exe ("server" , "examples/server/server.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , common , buildinfo , sampling , grammar_parser , json_schema_to_grammar , clip , llava });
138
+ const server = make .exe ("server" , "examples/server/server.cpp" , &.{ ggml , ggml_alloc , ggml_backend , ggml_quants , llama , unicode , unicode_data , common , buildinfo , sampling , grammar_parser , json_schema_to_grammar , clip , llava });
138
139
if (server .target .isWindows ()) {
139
140
server .linkSystemLibrary ("ws2_32" );
140
141
}
0 commit comments