Skip to content

Commit 9154002

Browse files
authored
[ET-VK] Use performant tiled algorithm for 4 bit weight only quantized linear (#10236)
## Context As title. Update the default compute shader for weight-only quantized int4 linear to use a tiled algorithm, which should boost performance for `gemm` cases, i.e. where `mat1` is a matrix. ## Changes * Changed `q_4w_linear` name to `q_4w_linear_tiled` name * Update the compute shader to use tiled algorithm Using a value of 3 for `TILE_ROWS`; I expect to add variants which switch between different output tile configurations. Differential Revision: [D73044649](https://our.internmc.facebook.com/intern/diff/D73044649/)
1 parent e6c7b30 commit 9154002

File tree

4 files changed

+172
-6
lines changed

4 files changed

+172
-6
lines changed

backends/vulkan/runtime/graph/ops/glsl/pack_int4_linear_weight_transposed_interleaved.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,4 @@ pack_int4_linear_weight_transposed_interleaved:
1313
- NAME: pack_int4_linear_weight_transposed_interleaved_buffer
1414
STORAGE: buffer
1515
- NAME: pack_int4_linear_weight_transposed_interleaved_nobitw8buffer_texture2d
16-
NO_INT8_BUFFERS: true
16+
NO_INT8_BUFFERS: true
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#version 450 core
10+
11+
#define PRECISION ${PRECISION}
12+
13+
#define T ${buffer_scalar_type(DTYPE)}
14+
#define VEC4_T ${buffer_gvec_type(DTYPE, 4)}
15+
16+
#define TILE_ROWS ${TILE_ROWS}
17+
18+
${define_required_extensions(DTYPE)}
19+
$if WEIGHT_STORAGE == "buffer":
20+
${define_required_extensions("uint8")}
21+
22+
#extension GL_EXT_control_flow_attributes : require
23+
24+
layout(std430) buffer;
25+
26+
${layout_declare_tensor(B, "w", "t_out", DTYPE, OUT_STORAGE, is_scalar_array=False)}
27+
${layout_declare_tensor(B, "r", "t_mat1", DTYPE, IN_STORAGE, is_scalar_array=False)}
28+
${layout_declare_tensor(B, "r", "t_qmat2", "uint8", WEIGHT_STORAGE, is_scalar_array=False)}
29+
${layout_declare_tensor(B, "r", "t_qparams", DTYPE, "buffer", is_scalar_array=False)}
30+
31+
layout(push_constant) uniform restrict Block {
32+
ivec4 out_sizes;
33+
ivec4 mat1_sizes;
34+
ivec4 qmat2_sizes;
35+
};
36+
37+
layout(local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in;
38+
39+
layout(constant_id = 3) const int group_size = 64;
40+
41+
/*
42+
* This shader computes a linear operator between a floating point input matrix
43+
* x and a weights matrix that is quantized to 4 bits.
44+
*
45+
* The (W, H, C) shape of each tensor is:
46+
* - x: (K, M)
47+
* - weights: (N / 2, K)
48+
* - The weights tensor has a data type of `uint8`. Each element in the tensor
49+
* contains 2 4-bit values packed into a uint8.
50+
* - See the pack_int4_linear_weight_transposed_interleave shader to see more
51+
* details on how the weight tensor is stored.
52+
* - qparams: (2, N, number_of_groups)
53+
* - This tensor contains the scales and zeros quantization parameters for the
54+
* weights tensor. The weight tensor is quantized group-wise, which means
55+
* that every `group_size` elements along the K dimension of the weights
56+
* tensor has independent quantization parameters. Along the width dim, the
57+
* first value contains the scale for the group and the second value
58+
* contains the zero point for the group.
59+
*
60+
* Each thread computes a tile of TILE_ROWS * 2 texels of the output tensor.
61+
*
62+
* Note that this shader assumes that all tensors are width packed.
63+
*/
64+
void main() {
65+
const uint out_row = gl_GlobalInvocationID.y * TILE_ROWS;
66+
// Each thread writes out 2 texels along the width axis, equivalent to 8
67+
// scalar elements. Therefore multiply the thread_idx.x by 8.
68+
const uint out_col = gl_GlobalInvocationID.x << 3;
69+
// Similar reasoning to the above, each thread works on 2 texels along the
70+
// width axis so multiply thread_idx.x by 2.
71+
const int out_col_texel_idx = int(gl_GlobalInvocationID.x) << 1;
72+
73+
if (out_col >= out_sizes.x || out_row >= out_sizes.y) {
74+
return;
75+
}
76+
77+
const int num_blocks = mat1_sizes.x / group_size;
78+
79+
VEC4_T mat1[TILE_ROWS];
80+
VEC4_T qmat2[4][2];
81+
VEC4_T sums[TILE_ROWS][2];
82+
83+
[[unroll]] for (int r = 0; r < TILE_ROWS; ++r) {
84+
sums[r][0] = VEC4_T(0);
85+
sums[r][1] = VEC4_T(0);
86+
}
87+
88+
VEC4_T scales[2];
89+
VEC4_T zeros[2];
90+
91+
$if WEIGHT_STORAGE == "buffer":
92+
const int qmat2_stride = qmat2_sizes.x >> 2;
93+
$if PARAMS_STORAGE == "buffer":
94+
const int qparams_y_stride = out_sizes.x >> 2;
95+
const int qparams_z_stride = qparams_y_stride * 2;
96+
97+
for (int block_idx = 0; block_idx < num_blocks; ++block_idx) {
98+
$if PARAMS_STORAGE == "buffer":
99+
scales[0] = t_qparams[block_idx * qparams_z_stride + out_col_texel_idx];
100+
zeros[0] = t_qparams[block_idx * qparams_z_stride + out_col_texel_idx + qparams_y_stride];
101+
102+
scales[1] = t_qparams[block_idx * qparams_z_stride + out_col_texel_idx + 1];
103+
zeros[1] = t_qparams[block_idx * qparams_z_stride + out_col_texel_idx + 1 + qparams_y_stride];
104+
$else:
105+
scales[0] = texelFetch(t_qparams, ivec3(out_col_texel_idx, 0, block_idx), 0);
106+
zeros[0] = texelFetch(t_qparams, ivec3(out_col_texel_idx, 1, block_idx), 0);
107+
108+
scales[1] = texelFetch(t_qparams, ivec3(out_col_texel_idx + 1, 0, block_idx), 0);
109+
zeros[1] = texelFetch(t_qparams, ivec3(out_col_texel_idx + 1, 1, block_idx), 0);
110+
111+
for (int g_idx = 0; g_idx < group_size; g_idx += 4) {
112+
const int k = block_idx * group_size + g_idx;
113+
114+
// Preload B
115+
[[unroll]] for (int r = 0; r < 4; ++r) {
116+
$if WEIGHT_STORAGE == "buffer":
117+
const u8vec4 packed_weight_tex = t_qmat2[(k + r) * qmat2_stride + gl_GlobalInvocationID.x];
118+
$else:
119+
const uvec4 packed_weight_tex = texelFetch(
120+
t_qmat2,
121+
ivec2(gl_GlobalInvocationID.x, k + r),
122+
0);
123+
124+
qmat2[r][0] = (VEC4_T((packed_weight_tex & 0xF0) >> 4) - 8.0) * scales[0] + zeros[0];
125+
qmat2[r][1] = (VEC4_T(packed_weight_tex & 0x0F) - 8.0) * scales[1] + zeros[1];
126+
}
127+
128+
// Preload A
129+
[[unroll]] for (int r = 0; r < TILE_ROWS; ++r) {
130+
$if IN_STORAGE == "buffer":
131+
mat1[r] = t_mat1[((out_row + r) * mat1_sizes.x + k) >> 2];
132+
$else:
133+
mat1[r] = texelFetch(t_mat1, ivec3(k >> 2, out_row + r, 0), 0);
134+
}
135+
136+
// Accumulate output tile
137+
[[unroll]] for (int r = 0; r < TILE_ROWS; ++r) {
138+
sums[r][0] += mat1[r].x * qmat2[0][0]
139+
+ mat1[r].y * qmat2[1][0]
140+
+ mat1[r].z * qmat2[2][0]
141+
+ mat1[r].w * qmat2[3][0];
142+
143+
sums[r][1] += mat1[r].x * qmat2[0][1]
144+
+ mat1[r].y * qmat2[1][1]
145+
+ mat1[r].z * qmat2[2][1]
146+
+ mat1[r].w * qmat2[3][1];
147+
}
148+
}
149+
}
150+
151+
[[unroll]] for (int r = 0; r < TILE_ROWS; ++r) {
152+
$if OUT_STORAGE == "buffer":
153+
if (out_row + r < out_sizes.y) {
154+
t_out[((out_row + r) * out_sizes.x + out_col) >> 2] = sums[r][0];
155+
t_out[((out_row + r) * out_sizes.x + out_col + 4) >> 2] = sums[r][1];
156+
}
157+
$else:
158+
imageStore(t_out, ivec3(out_col_texel_idx, out_row + r, 0), sums[r][0]);
159+
imageStore(t_out, ivec3(out_col_texel_idx + 1, out_row + r, 0), sums[r][1]);
160+
}
161+
}

backends/vulkan/runtime/graph/ops/glsl/q_4w_linear.yaml renamed to backends/vulkan/runtime/graph/ops/glsl/q_4w_linear_tiled.yaml

+5-4
Original file line numberDiff line numberDiff line change
@@ -4,19 +4,20 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7-
q_4w_linear:
7+
q_4w_linear_tiled:
88
parameter_names_with_default_values:
99
DTYPE: float
1010
OUT_STORAGE: texture3d
1111
IN_STORAGE: texture3d
1212
WEIGHT_STORAGE: texture2d
1313
PARAMS_STORAGE: buffer
14+
TILE_ROWS: 3
1415
shader_variants:
15-
- NAME: q_4w_linear_texture3d_texture3d_texture2d_float
16-
- NAME: q_4w_linear_buffer_buffer_texture2d_float
16+
- NAME: q_4w_linear_tiled_texture3d_texture3d_texture2d_float
17+
- NAME: q_4w_linear_tiled_buffer_buffer_texture2d_float
1718
OUT_STORAGE: buffer
1819
IN_STORAGE: buffer
19-
- NAME: q_4w_linear_buffer_buffer_buffer_float
20+
- NAME: q_4w_linear_tiled_buffer_buffer_buffer_float
2021
OUT_STORAGE: buffer
2122
IN_STORAGE: buffer
2223
WEIGHT_STORAGE: buffer

backends/vulkan/runtime/graph/ops/impl/QuantizedLinearGroupwiseInt4.cpp

+5-1
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,8 @@ void add_q_4w_linear_node(
146146
std::string kernel_name = "q_4w_linear";
147147
if (use_coop_algorithm) {
148148
kernel_name += "_coop";
149+
} else {
150+
kernel_name += "_tiled";
149151
}
150152
add_storage_type_suffix(kernel_name, graph.storage_type_of(out));
151153
add_storage_type_suffix(kernel_name, graph.storage_type_of(mat1));
@@ -154,10 +156,12 @@ void add_q_4w_linear_node(
154156

155157
utils::uvec3 global_wg_size = graph.logical_limits_of(out);
156158
global_wg_size[0] = utils::div_up(global_wg_size[0], uint32_t(2));
157-
158159
utils::uvec3 local_wg_size = graph.create_local_wg_size(global_wg_size);
160+
159161
if (use_coop_algorithm) {
160162
local_wg_size = {8, 1, 8};
163+
} else {
164+
global_wg_size[1] = utils::div_up(global_wg_size[1], uint32_t(3));
161165
}
162166

163167
graph.execute_nodes().emplace_back(new DispatchNode(

0 commit comments

Comments
 (0)