@@ -4998,7 +4998,16 @@ static __global__ void rope_neox(
4998
4998
const int ib = col / n_dims;
4999
4999
const int ic = col % n_dims;
5000
5000
5001
- const int i = row*ncols + ib*n_dims + ic/2 ;
5001
+ if (ib > 0 ) {
5002
+ const int i = row*ncols + ib*n_dims + ic;
5003
+
5004
+ dst[i + 0 ] = x[i + 0 ];
5005
+ dst[i + 1 ] = x[i + 1 ];
5006
+
5007
+ return ;
5008
+ }
5009
+
5010
+ const int i = row*ncols + ib*n_dims + ic/2 ;
5002
5011
const int i2 = row/p_delta_rows;
5003
5012
5004
5013
float cur_rot = inv_ndims * ic - ib;
@@ -7057,6 +7066,7 @@ inline void ggml_cuda_op_upscale(
7057
7066
7058
7067
(void ) src1;
7059
7068
(void ) dst;
7069
+ (void ) src1_dd;
7060
7070
}
7061
7071
7062
7072
inline void ggml_cuda_op_pad (
@@ -7073,6 +7083,7 @@ inline void ggml_cuda_op_pad(
7073
7083
7074
7084
(void ) src1;
7075
7085
(void ) dst;
7086
+ (void ) src1_dd;
7076
7087
}
7077
7088
7078
7089
inline void ggml_cuda_op_rms_norm (
@@ -7376,7 +7387,7 @@ inline void ggml_cuda_op_mul_mat_cublas(
7376
7387
7377
7388
const int compute_capability = g_compute_capabilities[id];
7378
7389
7379
- if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized (src0->type )) && ggml_is_contiguous (src0) && row_diff == src0->ne [1 ]) {
7390
+ if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized (src0->type )) && ggml_is_contiguous (src0) && row_diff == src0->ne [1 ] && dst-> op_params [ 0 ] == GGML_PREC_DEFAULT ) {
7380
7391
// convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32
7381
7392
half * src0_as_f16 = nullptr ;
7382
7393
size_t src0_as = 0 ;
@@ -8300,27 +8311,27 @@ static void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor
8300
8311
}
8301
8312
8302
8313
static __global__ void k_compute_batched_ptrs (
8303
- const half * src0_as_f16, const half * src1_as_f16, half * dst_f16 ,
8314
+ const half * src0_as_f16, const half * src1_as_f16, char * dst ,
8304
8315
const void ** ptrs_src, void ** ptrs_dst,
8305
- int ne12, int ne13,
8306
- int ne23,
8307
- int nb02, int nb03,
8308
- int nb12, int nb13,
8309
- int nb2, int nb3 ,
8310
- int r2, int r3) {
8311
- int i13 = blockIdx .x * blockDim .x + threadIdx .x ;
8312
- int i12 = blockIdx .y * blockDim .y + threadIdx .y ;
8316
+ int64_t ne12, int64_t ne13,
8317
+ int64_t ne23,
8318
+ size_t nb02, size_t nb03,
8319
+ size_t nb12, size_t nb13,
8320
+ size_t nbd2, size_t nbd3 ,
8321
+ int64_t r2, int64_t r3) {
8322
+ int64_t i13 = blockIdx .x * blockDim .x + threadIdx .x ;
8323
+ int64_t i12 = blockIdx .y * blockDim .y + threadIdx .y ;
8313
8324
8314
8325
if (i13 >= ne13 || i12 >= ne12) {
8315
8326
return ;
8316
8327
}
8317
8328
8318
- int i03 = i13 / r3;
8319
- int i02 = i12 / r2;
8329
+ int64_t i03 = i13 / r3;
8330
+ int64_t i02 = i12 / r2;
8320
8331
8321
8332
ptrs_src[0 *ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03;
8322
8333
ptrs_src[1 *ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12/2 + i13*nb13/2 ;
8323
- ptrs_dst[0 *ne23 + i12 + i13*ne12] = ( char *) dst_f16 + i12* nb2/ 2 + i13* nb3/ 2 ;
8334
+ ptrs_dst[0 *ne23 + i12 + i13*ne12] = ( char *) dst + i12*nbd2 + i13*nbd3 ;
8324
8335
}
8325
8336
8326
8337
static void ggml_cuda_mul_mat_mat_batched_cublas (const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
@@ -8376,7 +8387,41 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8376
8387
to_fp16_cuda (src1_ddf, src1_as_f16, ne1, main_stream);
8377
8388
8378
8389
size_t dst_as = 0 ;
8379
- half * dst_f16 = (half *) ggml_cuda_pool_malloc (ne * sizeof (half), &dst_as);
8390
+
8391
+ half * dst_f16 = nullptr ;
8392
+ char * dst_t = nullptr ;
8393
+
8394
+ cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F;
8395
+ cudaDataType_t cu_data_type = CUDA_R_16F;
8396
+
8397
+ // dst strides
8398
+ size_t nbd2 = dst->nb [2 ];
8399
+ size_t nbd3 = dst->nb [3 ];
8400
+
8401
+ const half alpha_f16 = 1 .0f ;
8402
+ const half beta_f16 = 0 .0f ;
8403
+
8404
+ const float alpha_f32 = 1 .0f ;
8405
+ const float beta_f32 = 0 .0f ;
8406
+
8407
+ const void * alpha = &alpha_f16;
8408
+ const void * beta = &beta_f16;
8409
+
8410
+ if (dst->op_params [0 ] == GGML_PREC_DEFAULT) {
8411
+ dst_f16 = (half *) ggml_cuda_pool_malloc (ne * sizeof (half), &dst_as);
8412
+ dst_t = (char *) dst_f16;
8413
+
8414
+ nbd2 /= sizeof (float ) / sizeof (half);
8415
+ nbd3 /= sizeof (float ) / sizeof (half);
8416
+ } else {
8417
+ dst_t = (char *) dst_ddf;
8418
+
8419
+ cu_compute_type = CUBLAS_COMPUTE_32F;
8420
+ cu_data_type = CUDA_R_32F;
8421
+
8422
+ alpha = &alpha_f32;
8423
+ beta = &beta_f32;
8424
+ }
8380
8425
8381
8426
GGML_ASSERT (ne12 % ne02 == 0 );
8382
8427
GGML_ASSERT (ne13 % ne03 == 0 );
@@ -8385,9 +8430,6 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8385
8430
const int64_t r2 = ne12/ne02;
8386
8431
const int64_t r3 = ne13/ne03;
8387
8432
8388
- const half alpha_f16 = 1 .0f ;
8389
- const half beta_f16 = 0 .0f ;
8390
-
8391
8433
#if 0
8392
8434
// use cublasGemmEx
8393
8435
{
@@ -8397,12 +8439,12 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8397
8439
int i02 = i12 / r2;
8398
8440
8399
8441
CUBLAS_CHECK(
8400
- cublasGemmEx(g_cublas_handles[id ], CUBLAS_OP_T, CUBLAS_OP_N,
8442
+ cublasGemmEx(g_cublas_handles[g_main_device ], CUBLAS_OP_T, CUBLAS_OP_N,
8401
8443
ne01, ne11, ne10,
8402
- &alpha_f16 , (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , CUDA_R_16F, nb01/sizeof(half),
8403
- (const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, CUDA_R_16F, nb11/sizeof(float),
8404
- &beta_f16 , ( char *) dst_f16 + i12* dst->nb[2]/2 + i13* dst->nb[3]/2, CUDA_R_16F , ne01,
8405
- CUBLAS_COMPUTE_16F ,
8444
+ alpha , (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , CUDA_R_16F, nb01/sizeof(half),
8445
+ (const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, CUDA_R_16F, nb11/sizeof(float),
8446
+ beta , ( char *) dst_t + i12*nbd2 + i13*nbd3, cu_data_type , ne01,
8447
+ cu_compute_type ,
8406
8448
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
8407
8449
}
8408
8450
}
@@ -8414,11 +8456,11 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8414
8456
CUBLAS_CHECK (
8415
8457
cublasGemmStridedBatchedEx (g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N,
8416
8458
ne01, ne11, ne10,
8417
- &alpha_f16 , (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof (half), src0->nb [2 ]/sizeof (half), // strideA
8418
- (const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof (float ), src1->nb [2 ]/sizeof (float ), // strideB
8419
- &beta_f16 , ( char *) dst_f16, CUDA_R_16F , ne01, dst->nb [2 ]/sizeof (float ), // strideC
8459
+ alpha , (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof (half), src0->nb [2 ]/sizeof (half), // strideA
8460
+ (const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof (float ), src1->nb [2 ]/sizeof (float ), // strideB
8461
+ beta , ( char *) dst_t , cu_data_type , ne01, dst->nb [2 ]/sizeof (float ), // strideC
8420
8462
ne12*ne13,
8421
- CUBLAS_COMPUTE_16F ,
8463
+ cu_compute_type ,
8422
8464
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
8423
8465
} else {
8424
8466
// use cublasGemmBatchedEx
@@ -8435,24 +8477,24 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8435
8477
8436
8478
dim3 block_dims (ne13, ne12);
8437
8479
k_compute_batched_ptrs<<<1 , block_dims, 0 , main_stream>>> (
8438
- src0_as_f16, src1_as_f16, dst_f16 ,
8480
+ src0_as_f16, src1_as_f16, dst_t ,
8439
8481
ptrs_src, ptrs_dst,
8440
8482
ne12, ne13,
8441
8483
ne23,
8442
8484
nb02, nb03,
8443
8485
nb12, nb13,
8444
- dst-> nb [ 2 ], dst-> nb [ 3 ] ,
8486
+ nbd2, nbd3 ,
8445
8487
r2, r3);
8446
8488
CUDA_CHECK (cudaGetLastError ());
8447
8489
8448
8490
CUBLAS_CHECK (
8449
8491
cublasGemmBatchedEx (g_cublas_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N,
8450
8492
ne01, ne11, ne10,
8451
- &alpha_f16 , (const void **) (ptrs_src + 0 *ne23), CUDA_R_16F, nb01/sizeof (half),
8452
- (const void **) (ptrs_src + 1 *ne23), CUDA_R_16F, nb11/sizeof (float ),
8453
- &beta_f16 , ( void **) (ptrs_dst + 0 *ne23), CUDA_R_16F , ne01,
8493
+ alpha , (const void **) (ptrs_src + 0 *ne23), CUDA_R_16F, nb01/sizeof (half),
8494
+ (const void **) (ptrs_src + 1 *ne23), CUDA_R_16F, nb11/sizeof (float ),
8495
+ beta , ( void **) (ptrs_dst + 0 *ne23), cu_data_type , ne01,
8454
8496
ne23,
8455
- CUBLAS_COMPUTE_16F ,
8497
+ cu_compute_type ,
8456
8498
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
8457
8499
8458
8500
if (ptrs_src_s != 0 ) {
@@ -8464,11 +8506,14 @@ static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const
8464
8506
}
8465
8507
#endif
8466
8508
8467
- const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (GGML_TYPE_F16);
8468
- to_fp32_cuda (dst_f16, dst_ddf, ne, main_stream);
8509
+ if (dst->op_params [0 ] == GGML_PREC_DEFAULT) {
8510
+ const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda (GGML_TYPE_F16);
8511
+ to_fp32_cuda (dst_f16, dst_ddf, ne, main_stream);
8512
+
8513
+ ggml_cuda_pool_free (dst_f16, dst_as);
8514
+ }
8469
8515
8470
8516
ggml_cuda_pool_free (src1_as_f16, src1_as);
8471
- ggml_cuda_pool_free (dst_f16, dst_as);
8472
8517
}
8473
8518
8474
8519
static void ggml_cuda_mul_mat (const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
0 commit comments