Skip to content

Commit 300907b

Browse files
authored
opencl: Fix rope and softmax (#11833)
* opencl: fix `ROPE` * opencl: fix `SOFT_MAX` * Add fp16 variant * opencl: enforce subgroup size for `soft_max`
1 parent 94b87f8 commit 300907b

File tree

2 files changed

+164
-4
lines changed

2 files changed

+164
-4
lines changed

ggml/src/ggml-opencl/ggml-opencl.cpp

+26-4
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,7 @@ struct ggml_backend_opencl_context {
143143
cl_kernel kernel_rms_norm;
144144
cl_kernel kernel_diag_mask_inf, kernel_diag_mask_inf_8;
145145
cl_kernel kernel_soft_max, kernel_soft_max_4;
146+
cl_kernel kernel_soft_max_f16, kernel_soft_max_4_f16;
146147
cl_kernel kernel_get_rows_f32, kernel_get_rows_f16, kernel_get_rows_q4_0;
147148
cl_kernel kernel_rope_norm_f32, kernel_rope_norm_f16, kernel_rope_neox_f32, kernel_rope_neox_f16;
148149
cl_kernel kernel_cpy_f16_f16, kernel_cpy_f16_f32, kernel_cpy_f32_f16, kernel_cpy_f32_f32;
@@ -614,6 +615,8 @@ static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) {
614615
CL_CHECK((backend_ctx->kernel_diag_mask_inf_8 = clCreateKernel(backend_ctx->program, "kernel_diag_mask_inf_8", &err), err));
615616
CL_CHECK((backend_ctx->kernel_soft_max = clCreateKernel(backend_ctx->program, "kernel_soft_max", &err), err));
616617
CL_CHECK((backend_ctx->kernel_soft_max_4 = clCreateKernel(backend_ctx->program, "kernel_soft_max_4", &err), err));
618+
CL_CHECK((backend_ctx->kernel_soft_max_f16 = clCreateKernel(backend_ctx->program, "kernel_soft_max_f16", &err), err));
619+
CL_CHECK((backend_ctx->kernel_soft_max_4_f16 = clCreateKernel(backend_ctx->program, "kernel_soft_max_4_f16", &err), err));
617620
CL_CHECK((backend_ctx->kernel_rope_norm_f32 = clCreateKernel(backend_ctx->program, "kernel_rope_norm_f32", &err), err));
618621
CL_CHECK((backend_ctx->kernel_rope_norm_f16 = clCreateKernel(backend_ctx->program, "kernel_rope_norm_f16", &err), err));
619622
CL_CHECK((backend_ctx->kernel_rope_neox_f32 = clCreateKernel(backend_ctx->program, "kernel_rope_neox_f32", &err), err));
@@ -1044,8 +1047,16 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
10441047
return true;
10451048
case GGML_OP_DIAG_MASK_INF:
10461049
return op->ne[3] == 1;
1047-
case GGML_OP_ROPE:
1050+
case GGML_OP_ROPE: {
1051+
const int mode = ((const int32_t *) op->op_params)[2];
1052+
if (mode & GGML_ROPE_TYPE_MROPE) {
1053+
return false;
1054+
}
1055+
if (mode & GGML_ROPE_TYPE_VISION) {
1056+
return false;
1057+
}
10481058
return true;
1059+
}
10491060
default:
10501061
return false;
10511062
}
@@ -3666,6 +3677,8 @@ static void ggml_cl_soft_max(ggml_backend_t backend, const ggml_tensor * src0, c
36663677
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
36673678
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
36683679

3680+
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
3681+
36693682
// Local size must be wave size. Each workgroup is a wave, working on a row,
36703683
// where a row corresponds to leading dimension.
36713684
int nth = MIN(32, ne00);
@@ -3683,9 +3696,17 @@ static void ggml_cl_soft_max(ggml_backend_t backend, const ggml_tensor * src0, c
36833696
cl_kernel kernel;
36843697

36853698
if (ne00%4 == 0) {
3686-
kernel = backend_ctx->kernel_soft_max_4;
3699+
if (use_f16) {
3700+
kernel = backend_ctx->kernel_soft_max_4_f16;
3701+
} else {
3702+
kernel = backend_ctx->kernel_soft_max_4;
3703+
}
36873704
} else {
3688-
kernel = backend_ctx->kernel_soft_max;
3705+
if (use_f16) {
3706+
kernel = backend_ctx->kernel_soft_max_f16;
3707+
} else {
3708+
kernel = backend_ctx->kernel_soft_max;
3709+
}
36893710
}
36903711

36913712
CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
@@ -3766,7 +3787,8 @@ static void ggml_cl_rope(ggml_backend_t backend, const ggml_tensor * src0, const
37663787
const int nb2 = dst ? dst->nb[2] : 0;
37673788
const int nb3 = dst ? dst->nb[3] : 0;
37683789

3769-
GGML_ASSERT(ne10 == ne02);
3790+
GGML_ASSERT(ne10 % ne02 == 0);
3791+
GGML_ASSERT(ne10 >= ne02);
37703792

37713793
int nth = MIN(64, ne00);
37723794

ggml/src/ggml-opencl/kernels/ggml-opencl.cl

+138
Original file line numberDiff line numberDiff line change
@@ -679,6 +679,9 @@ kernel void kernel_diag_mask_inf_8(
679679
//------------------------------------------------------------------------------
680680
// softmax
681681
//------------------------------------------------------------------------------
682+
#ifdef ADRENO_GPU
683+
REQD_SUBGROUP_SIZE_64
684+
#endif
682685
kernel void kernel_soft_max(
683686
global float * src0,
684687
ulong offset0,
@@ -811,6 +814,141 @@ kernel void kernel_soft_max_4(
811814
}
812815
}
813816

817+
#ifdef ADRENO_GPU
818+
REQD_SUBGROUP_SIZE_64
819+
#endif
820+
kernel void kernel_soft_max_f16(
821+
global float * src0,
822+
ulong offset0,
823+
global half * src1,
824+
ulong offset1,
825+
global float * dst,
826+
ulong offsetd,
827+
int ne00,
828+
int ne01,
829+
int ne02,
830+
float scale,
831+
float max_bias,
832+
float m0,
833+
float m1,
834+
int n_head_log2
835+
) {
836+
src0 = (global float *)((global char *)src0 + offset0);
837+
src1 = (global half *)((global char *)src1 + offset1);
838+
dst = (global float *)((global char *)dst + offsetd);
839+
840+
int i03 = get_group_id(2);
841+
int i02 = get_group_id(1);
842+
int i01 = get_group_id(0);
843+
844+
global float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
845+
global half * pmask = (global char *)src1 != (global char *)src0 ? src1 + i01*ne00 : 0;
846+
global float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
847+
848+
float slope = 1.0f;
849+
850+
// ALiBi
851+
if (max_bias > 0.0f) {
852+
int h = i02;
853+
854+
float base = h < n_head_log2 ? m0 : m1;
855+
int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
856+
857+
slope = pow(base, exp);
858+
}
859+
860+
// parallel max
861+
float lmax = -INFINITY;
862+
for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) {
863+
lmax = fmax(lmax, psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f));
864+
}
865+
float max = sub_group_reduce_max(lmax);
866+
867+
// parallel sum
868+
float lsum = 0.0f;
869+
for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) {
870+
float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max);
871+
lsum += exp_psrc0;
872+
// Remember the result of exp here. exp is expensive, so we really do not
873+
// wish to compute it twice.
874+
pdst[i00] = exp_psrc0;
875+
}
876+
877+
const float sum = sub_group_reduce_add(lsum);
878+
879+
for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) {
880+
pdst[i00] /= sum;
881+
}
882+
}
883+
884+
#ifdef ADRENO_GPU
885+
REQD_SUBGROUP_SIZE_64
886+
#endif
887+
kernel void kernel_soft_max_4_f16(
888+
global float * src0,
889+
ulong offset0,
890+
global half * src1,
891+
ulong offset1,
892+
global float * dst,
893+
ulong offsetd,
894+
int ne00,
895+
int ne01,
896+
int ne02,
897+
float scale,
898+
float max_bias,
899+
float m0,
900+
float m1,
901+
int n_head_log2
902+
) {
903+
src0 = (global float *)((global char *)src0 + offset0);
904+
src1 = (global half *)((global char *)src1 + offset1);
905+
dst = (global float *)((global char *)dst + offsetd);
906+
907+
int i03 = get_group_id(2);
908+
int i02 = get_group_id(1);
909+
int i01 = get_group_id(0);
910+
911+
global float4 * psrc4 = (global float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
912+
global half4 * pmask = (global char *)src1 != (global char *)src0 ? (global half4 *)(src1 + i01*ne00) : 0;
913+
global float4 * pdst4 = (global float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
914+
915+
float slope = 1.0f;
916+
917+
// ALiBi
918+
if (max_bias > 0.0f) {
919+
int h = i02;
920+
921+
float base = h < n_head_log2 ? m0 : m1;
922+
int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
923+
924+
slope = pow(base, exp);
925+
}
926+
927+
// parallel max
928+
float4 lmax4 = -INFINITY;
929+
for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) {
930+
lmax4 = fmax(lmax4, psrc4[i00]*scale + slope*(pmask ? convert_float4(pmask[i00]) : 0.0f));
931+
}
932+
float lmax = fmax(fmax(lmax4.s0, lmax4.s1), fmax(lmax4.s2, lmax4.s3));
933+
934+
const float max = sub_group_reduce_max(lmax);
935+
936+
// parallel sum
937+
float4 lsum4 = 0.0f;
938+
for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) {
939+
const float4 exp_psrc4 = exp((psrc4[i00]*scale + slope*(pmask ? convert_float4(pmask[i00]) : 0.0f)) - max);
940+
lsum4 += exp_psrc4;
941+
pdst4[i00] = exp_psrc4;
942+
}
943+
float lsum = lsum4.s0 + lsum4.s1 + lsum4.s2 + lsum4.s3;
944+
945+
const float sum = sub_group_reduce_add(lsum);
946+
947+
for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) {
948+
pdst4[i00] /= sum;
949+
}
950+
}
951+
814952
//------------------------------------------------------------------------------
815953
// kernel_rope
816954
//------------------------------------------------------------------------------

0 commit comments

Comments
 (0)