@@ -33,26 +33,33 @@ jobs:
33
33
run : make quantize
34
34
35
35
- name : file info
36
- run : file build/quantize*
36
+ run : file build/quantize-arm64-darwin
37
+
38
+ - name : test quantize
39
+ run : |
40
+ build/quantize-arm64-darwin \
41
+ llama.cpp/models/ggml-vocab-llama.gguf \
42
+ /tmp/ggml-vocab-Q4_K_M.gguf \
43
+ Q4_K_M
37
44
38
45
- uses : actions/upload-artifact@v4
39
46
with :
40
- name : " quantize-macos- arm64"
41
- path : build/quantize*
47
+ name : " quantize-arm64-darwin "
48
+ path : build/quantize-arm64-darwin
42
49
43
50
linux-build :
44
51
name : " Build quantize on Linux for ${{ matrix.arch }}"
45
52
runs-on : " ubuntu-latest"
46
53
strategy :
47
54
fail-fast : true
48
55
matrix :
49
- arch :
50
- - " amd64"
51
- - " arm64 "
52
- # - "ppc64le"
53
- # - "s390x "
54
- image :
55
- - registry.access.redhat.com/ubi9 /python-312
56
+ include :
57
+ - arch : " amd64"
58
+ suffix : " x86_64-linux "
59
+ image : quay.io/sclorg/python-312-c8s:c8s
60
+ - arch : " arm64 "
61
+ suffix : " aarch64-linux "
62
+ image : quay.io/sclorg /python-312-c8s:c8s
56
63
steps :
57
64
- uses : " actions/checkout@v4"
58
65
with :
@@ -70,21 +77,34 @@ jobs:
70
77
71
78
- name : make build/quantize from llama.cpp sources
72
79
run : |
73
- set -e
74
- docker run --platform linux/${{ matrix.arch }} ${{ matrix.image }} uname -a
75
- docker run --platform linux/${{ matrix.arch }} \
76
- -v .:/opt/app-root/src \
77
- -e CMAKE_ARGS="-DLLAMA_FATAL_WARNINGS=ON" \
78
- ${{ matrix.image }} \
79
- make quantize
80
+ set -e
81
+ docker run --platform linux/${{ matrix.arch }} ${{ matrix.image }} uname -a
82
+ docker run --platform linux/${{ matrix.arch }} \
83
+ -v .:/opt/app-root/src \
84
+ -e CMAKE_ARGS="-DLLAMA_FATAL_WARNINGS=ON" \
85
+ ${{ matrix.image }} \
86
+ make quantize
80
87
81
88
- name : file info
82
- run : file build/quantize*
89
+ run : file build/quantize-${{ matrix.suffix }}
90
+
91
+ - name : file symbols
92
+ run : nm -a build/quantize-${{ matrix.suffix }} | grep -o "GLIBC.*" | sort -u
93
+
94
+ - name : test quantize
95
+ run : |
96
+ docker run --platform linux/${{ matrix.arch }} \
97
+ -v .:/opt/app-root/src \
98
+ ${{ matrix.image }} \
99
+ build/quantize-${{ matrix.suffix }} \
100
+ llama.cpp/models/ggml-vocab-llama.gguf \
101
+ /tmp/ggml-vocab-Q4_K_M.gguf \
102
+ Q4_K_M
83
103
84
104
- uses : actions/upload-artifact@v4
85
105
with :
86
- name : " quantize-linux- ${{ matrix.arch }}"
87
- path : build/quantize*
106
+ name : " quantize-${{ matrix.suffix }}"
107
+ path : build/quantize-${{ matrix.suffix }}
88
108
89
109
merge-artifacts :
90
110
name : Merge artifacts
0 commit comments