Skip to content

Commit 438a839

Browse files
vulkan: add specific MMV kernels for IQ2 and IQ3 quants + optimizations (#11595)
* vulkan: implement specialized MMV kernels for IQ2 quantizations * vulkan: add MMV kernels for IQ3 quants * vulkan: Increase MMV batch size and unroll IQ LUT setup * vulkan: fix init_iq_shmem for WG sizes larger than tables * vulkan: common batch size for all I-quants
1 parent 9c42b17 commit 438a839

9 files changed

+509
-42
lines changed

ggml/src/ggml-vulkan/ggml-vulkan.cpp

+28-27
Large diffs are not rendered by default.

ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp

+2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
#version 450
22

3+
#extension GL_EXT_control_flow_attributes : enable
4+
35
#include "types.comp"
46
#include "generic_binary_head.comp"
57
#include "dequant_funcs.comp"
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
#version 450
2+
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
3+
4+
#include "mul_mat_vec_base.comp"
5+
6+
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
7+
8+
FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
9+
10+
void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
11+
const uint y_idx = i * QUANT_K + 16 * itid;
12+
const uint nibble_shift = 4 * (itid & 1);
13+
const uint ib32 = itid / 2; // 0..7
14+
15+
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
16+
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
17+
const float d = float(data_a[ibi].d);
18+
const uint scale = (data_a[ibi].scales[ib32] >> nibble_shift) & 0xF;
19+
const float db = d * (0.5 + scale) * 0.25;
20+
21+
const uint qh = data_a[ibi].qh[ib32];
22+
const u8vec2 qs16 = unpack8(data_a_packed16[ibi].qs[itid]);
23+
const u8vec2 sign16 = unpack8(data_a_packed16[ibi].qs[QUANT_K / 16 + itid]);
24+
[[unroll]] for (uint l = 0; l < 2; ++l) {
25+
const uint8_t sign = sign16[l];
26+
const uint qs = qs16[l] | ((qh << (8 - nibble_shift - 2 * l)) & 0x300);
27+
const uvec2 grid = iq2s_grid[qs];
28+
const vec4 grid0 = vec4(unpack8(grid.x));
29+
const vec4 grid1 = vec4(unpack8(grid.y));
30+
31+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
32+
vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]);
33+
vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]);
34+
35+
FLOAT_TYPE sum =
36+
fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x),
37+
fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y),
38+
fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z),
39+
fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w),
40+
fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x),
41+
fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y),
42+
fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z),
43+
fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign & 128) != 0 ? -grid1.w : grid1.w),
44+
FLOAT_TYPE(0.0)))))))));
45+
temp[j][n] = fma(db, sum, temp[j][n]);
46+
}
47+
}
48+
ibi += num_blocks_per_row;
49+
}
50+
}
51+
52+
void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
53+
uint a_offset, b_offset, d_offset;
54+
get_offsets(a_offset, b_offset, d_offset);
55+
56+
const uint num_blocks_per_row = p.ncols / QUANT_K;
57+
58+
// 16 threads are used to process each block
59+
const uint blocks_per_wg = gl_WorkGroupSize.x/16;
60+
const uint tid = gl_LocalInvocationID.x;
61+
const uint itid = tid % 16; // 0...15
62+
const uint ix = tid / 16;
63+
64+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
65+
[[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
66+
temp[j][i] = FLOAT_TYPE(0);
67+
}
68+
}
69+
70+
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += blocks_per_wg)
71+
calc_superblock(a_offset, b_offset, itid, i, num_blocks_per_row, first_row, num_rows);
72+
73+
reduce_result(temp, d_offset, first_row, num_rows, tid);
74+
}
75+
76+
void main() {
77+
const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
78+
79+
init_iq_shmem(gl_WorkGroupSize);
80+
81+
// do NUM_ROWS at a time, unless there aren't enough remaining rows
82+
if (first_row + NUM_ROWS <= p.stride_d) {
83+
compute_outputs(first_row, NUM_ROWS);
84+
} else {
85+
if (first_row >= p.stride_d) {
86+
return;
87+
}
88+
compute_outputs(first_row, p.stride_d - first_row);
89+
}
90+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
#version 450
2+
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
3+
4+
#include "mul_mat_vec_base.comp"
5+
6+
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
7+
8+
FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
9+
10+
void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
11+
const uint y_idx = i * QUANT_K + 16 * itid;
12+
const uint nibble_shift = 4 * (itid & 1);
13+
const uint ib32 = itid / 2; // 0..7
14+
15+
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
16+
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
17+
const float d = float(data_a[ibi].d);
18+
const uint scale = (data_a[ibi].scales[ib32] >> nibble_shift) & 0xF;
19+
const float db = d * (0.5 + scale) * 0.25;
20+
21+
[[unroll]] for (uint l = 0; l < 2; ++l) {
22+
const uint qs = data_a[ibi].qs[2 * itid + l];
23+
const uint sign = qs >> 9;
24+
const uint sign7 = bitCount(sign);
25+
const vec4 grid0 = vec4(unpack8(iq2xs_grid[qs & 511].x));
26+
const vec4 grid1 = vec4(unpack8(iq2xs_grid[qs & 511].y));
27+
28+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
29+
vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]);
30+
vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]);
31+
32+
FLOAT_TYPE sum =
33+
fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x),
34+
fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y),
35+
fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z),
36+
fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w),
37+
fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x),
38+
fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y),
39+
fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z),
40+
fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign7 & 1) != 0 ? -grid1.w : grid1.w),
41+
FLOAT_TYPE(0.0)))))))));
42+
temp[j][n] = fma(db, sum, temp[j][n]);
43+
}
44+
}
45+
ibi += num_blocks_per_row;
46+
}
47+
}
48+
49+
void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
50+
uint a_offset, b_offset, d_offset;
51+
get_offsets(a_offset, b_offset, d_offset);
52+
53+
const uint num_blocks_per_row = p.ncols / QUANT_K;
54+
55+
// 16 threads are used to process each block
56+
const uint blocks_per_wg = gl_WorkGroupSize.x/16;
57+
const uint tid = gl_LocalInvocationID.x;
58+
const uint itid = tid % 16; // 0...15
59+
const uint ix = tid / 16;
60+
61+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
62+
[[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
63+
temp[j][i] = FLOAT_TYPE(0);
64+
}
65+
}
66+
67+
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += blocks_per_wg)
68+
calc_superblock(a_offset, b_offset, itid, i, num_blocks_per_row, first_row, num_rows);
69+
70+
reduce_result(temp, d_offset, first_row, num_rows, tid);
71+
}
72+
73+
void main() {
74+
const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
75+
76+
init_iq_shmem(gl_WorkGroupSize);
77+
78+
// do NUM_ROWS at a time, unless there aren't enough remaining rows
79+
if (first_row + NUM_ROWS <= p.stride_d) {
80+
compute_outputs(first_row, NUM_ROWS);
81+
} else {
82+
if (first_row >= p.stride_d) {
83+
return;
84+
}
85+
compute_outputs(first_row, p.stride_d - first_row);
86+
}
87+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
#version 450
2+
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
3+
4+
#include "mul_mat_vec_base.comp"
5+
6+
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
7+
8+
FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
9+
10+
void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
11+
const uint y_idx = i * QUANT_K + 16 * itid;
12+
const uint ib32 = itid / 2; // 0..7
13+
14+
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
15+
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
16+
const float d = float(data_a[ibi].d);
17+
const uint signscale = pack32(u16vec2(
18+
data_a_packed16[ibi].qs[4 * ib32 + 2],
19+
data_a_packed16[ibi].qs[4 * ib32 + 3]));
20+
const float db = d * 0.25 * (0.5 + (signscale >> 28));
21+
[[unroll]] for (uint l = 0; l < 2; ++l) {
22+
const uint qs = data_a[ibi].qs[8 * ib32 + 2 * (itid & 1) + l];
23+
const uint sign = bitfieldExtract(signscale, 7 * int(2 * (itid & 1) + l), 7);
24+
const uint sign7 = bitCount(sign);
25+
const vec4 grid0 = vec4(unpack8(iq2xxs_grid[qs].x));
26+
const vec4 grid1 = vec4(unpack8(iq2xxs_grid[qs].y));
27+
28+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
29+
const vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]);
30+
const vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]);
31+
32+
FLOAT_TYPE sum =
33+
fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x),
34+
fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y),
35+
fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z),
36+
fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w),
37+
fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x),
38+
fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y),
39+
fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z),
40+
fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign7 & 1) != 0 ? -grid1.w : grid1.w),
41+
FLOAT_TYPE(0.0)))))))));
42+
temp[j][n] = fma(db, sum, temp[j][n]);
43+
}
44+
}
45+
ibi += num_blocks_per_row;
46+
}
47+
}
48+
49+
void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
50+
uint a_offset, b_offset, d_offset;
51+
get_offsets(a_offset, b_offset, d_offset);
52+
53+
const uint num_blocks_per_row = p.ncols / QUANT_K;
54+
55+
// 16 threads are used to process each block
56+
const uint blocks_per_wg = gl_WorkGroupSize.x/16;
57+
const uint tid = gl_LocalInvocationID.x;
58+
const uint itid = tid % 16; // 0...15
59+
const uint ix = tid / 16;
60+
61+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
62+
[[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
63+
temp[j][i] = FLOAT_TYPE(0);
64+
}
65+
}
66+
67+
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += blocks_per_wg)
68+
calc_superblock(a_offset, b_offset, itid, i, num_blocks_per_row, first_row, num_rows);
69+
70+
reduce_result(temp, d_offset, first_row, num_rows, tid);
71+
}
72+
73+
void main() {
74+
const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
75+
76+
init_iq_shmem(gl_WorkGroupSize);
77+
78+
// do NUM_ROWS at a time, unless there aren't enough remaining rows
79+
if (first_row + NUM_ROWS <= p.stride_d) {
80+
compute_outputs(first_row, NUM_ROWS);
81+
} else {
82+
if (first_row >= p.stride_d) {
83+
return;
84+
}
85+
compute_outputs(first_row, p.stride_d - first_row);
86+
}
87+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
#version 450
2+
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
3+
4+
#include "mul_mat_vec_base.comp"
5+
6+
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
7+
8+
FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
9+
10+
void calc_superblock(const uint a_offset, const uint b_offset, const uint ib32, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
11+
const uint y_idx = i * QUANT_K + 32 * ib32;
12+
13+
uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
14+
[[unroll]] for (uint n = 0; n < num_rows; ++n) {
15+
const float d = float(data_a[ibi].d);
16+
const uint scale = (data_a[ibi].scales[ib32/2] >> (4 * (ib32 & 1))) & 0xF;
17+
const float dscale = d * (1 + 2 * scale);
18+
const uint qh = data_a[ibi].qh[ib32];
19+
FLOAT_TYPE sum[NUM_COLS];
20+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
21+
sum[j] = 0.0;
22+
}
23+
[[unroll]] for (uint l = 0; l < 4; ++l) {
24+
const u8vec2 qs = unpack8(data_a_packed16[ibi].qs[4 * ib32 + l]);
25+
const uint sign = data_a[ibi].signs[4 * ib32 + l];
26+
const vec4 grid0 = vec4(unpack8(iq3s_grid[qs.x | ((qh << (8 - 2*l)) & 0x100)]));
27+
const vec4 grid1 = vec4(unpack8(iq3s_grid[qs.y | ((qh << (7 - 2*l)) & 0x100)]));
28+
29+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
30+
const vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]);
31+
const vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]);
32+
33+
sum[j] =
34+
fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x),
35+
fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y),
36+
fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z),
37+
fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w),
38+
fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x),
39+
fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y),
40+
fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z),
41+
fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign & 128) != 0 ? -grid1.w : grid1.w),
42+
sum[j]))))))));
43+
}
44+
}
45+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
46+
temp[j][n] = fma(dscale, sum[j], temp[j][n]);
47+
}
48+
ibi += num_blocks_per_row;
49+
}
50+
}
51+
52+
void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
53+
uint a_offset, b_offset, d_offset;
54+
get_offsets(a_offset, b_offset, d_offset);
55+
56+
const uint num_blocks_per_row = p.ncols / QUANT_K;
57+
58+
// 8 threads are used to process each block
59+
const uint blocks_per_wg = gl_WorkGroupSize.x/8;
60+
const uint tid = gl_LocalInvocationID.x;
61+
const uint itid = tid % 8; // 0...7
62+
const uint ix = tid / 8;
63+
64+
[[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
65+
[[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
66+
temp[j][i] = FLOAT_TYPE(0);
67+
}
68+
}
69+
70+
[[unroll]] for (uint i = ix; i < num_blocks_per_row; i += blocks_per_wg)
71+
calc_superblock(a_offset, b_offset, itid, i, num_blocks_per_row, first_row, num_rows);
72+
73+
reduce_result(temp, d_offset, first_row, num_rows, tid);
74+
}
75+
76+
void main() {
77+
const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
78+
79+
init_iq_shmem(gl_WorkGroupSize);
80+
81+
// do NUM_ROWS at a time, unless there aren't enough remaining rows
82+
if (first_row + NUM_ROWS <= p.stride_d) {
83+
compute_outputs(first_row, NUM_ROWS);
84+
} else {
85+
if (first_row >= p.stride_d) {
86+
return;
87+
}
88+
compute_outputs(first_row, p.stride_d - first_row);
89+
}
90+
}

0 commit comments

Comments
 (0)