@@ -366,7 +366,7 @@ ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer,
366
366
if (padded_size > original_size && tensor->view_src == nullptr ) {
367
367
SYCL_CHECK (CHECK_TRY_ERROR (ctx->stream ->memset (
368
368
(char *)tensor->data + original_size, 0 ,
369
- padded_size - original_size). wait () ));
369
+ padded_size - original_size)));
370
370
}
371
371
}
372
372
return GGML_STATUS_SUCCESS;
@@ -500,7 +500,7 @@ static void ggml_backend_sycl_buffer_clear(ggml_backend_buffer_t buffer,
500
500
501
501
SYCL_CHECK (CHECK_TRY_ERROR ((*stream)
502
502
.memset (ctx->dev_ptr , value, buffer->size )
503
- . wait () ));
503
+ ));
504
504
}
505
505
catch (sycl::exception const &exc) {
506
506
std::cerr << exc.what () << " Exception caught at file:" << __FILE__
@@ -522,7 +522,6 @@ static void ggml_backend_sycl_buffer_memset_tensor(ggml_backend_buffer_t buffer,
522
522
}
523
523
void * target_ptr = static_cast <char *>(tensor->data ) + offset;
524
524
SYCL_CHECK (CHECK_TRY_ERROR ((*stream).memset (target_ptr, value, size)));
525
- SYCL_CHECK (CHECK_TRY_ERROR ((*stream).wait ()));
526
525
}
527
526
528
527
static void ggml_backend_sycl_buffer_reset (ggml_backend_buffer_t buffer) {
@@ -844,7 +843,7 @@ ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer,
844
843
SYCL_CHECK (CHECK_TRY_ERROR (
845
844
(*stream)
846
845
.memset (buf + original_size, 0 , size - original_size)
847
- . wait () ));
846
+ ));
848
847
}
849
848
850
849
extra->data_device [i] = buf;
@@ -912,7 +911,7 @@ ggml_backend_sycl_split_buffer_set_tensor(ggml_backend_buffer_t buffer,
912
911
SYCL_CHECK (CHECK_TRY_ERROR (
913
912
(*stream)
914
913
.memcpy (extra->data_device [i], buf_host, original_size)
915
- . wait () ));
914
+ ));
916
915
}
917
916
}
918
917
catch (sycl::exception const &exc) {
@@ -965,7 +964,7 @@ ggml_backend_sycl_split_buffer_get_tensor(ggml_backend_buffer_t buffer,
965
964
SYCL_CHECK (CHECK_TRY_ERROR (
966
965
(*stream)
967
966
.memcpy (buf_host, extra->data_device [i], original_size)
968
- . wait () ));
967
+ ));
969
968
}
970
969
}
971
970
catch (sycl::exception const &exc) {
@@ -2505,7 +2504,7 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
2505
2504
SYCL_CHECK (CHECK_TRY_ERROR (stream->memcpy (
2506
2505
src1_ddq_i, src1_ddq_i_source,
2507
2506
src1_ncols * src1_padded_col_size * q8_1_ts /
2508
- q8_1_bs). wait () ));
2507
+ q8_1_bs)));
2509
2508
} else {
2510
2509
2511
2510
float * src1_ddf_i_source = (float *) src1_extra->data_device [ctx.device ];
@@ -2572,7 +2571,7 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten
2572
2571
dhf_dst_i += src1_col_0*ne0;
2573
2572
SYCL_CHECK (CHECK_TRY_ERROR (
2574
2573
stream->memcpy (dhf_dst_i, dst_dd_i,
2575
- src1_ncols * ne0 * sizeof (float )). wait () ));
2574
+ src1_ncols * ne0 * sizeof (float ))));
2576
2575
}
2577
2576
}
2578
2577
@@ -3741,7 +3740,7 @@ static void ggml_backend_sycl_get_tensor_async(ggml_backend_t backend,
3741
3740
GGML_ASSERT (buf->buft == ggml_backend_sycl_buffer_type (sycl_ctx->device ) && " unsupported buffer type" );
3742
3741
const queue_ptr stream = sycl_ctx->stream (sycl_ctx->device , 0 );
3743
3742
SYCL_CHECK (CHECK_TRY_ERROR ((stream)->memcpy (
3744
- data, (const char *)tensor->data + offset, size). wait () ));
3743
+ data, (const char *)tensor->data + offset, size)));
3745
3744
}
3746
3745
catch (sycl::exception const &exc) {
3747
3746
std::cerr << exc.what () << " Exception caught at file:" << __FILE__
@@ -3761,7 +3760,7 @@ static bool ggml_backend_sycl_cpy_tensor_async(ggml_backend_t backend,
3761
3760
*/
3762
3761
const queue_ptr stream = sycl_ctx->stream (sycl_ctx->device , 0 );
3763
3762
SYCL_CHECK (CHECK_TRY_ERROR ((stream)->memcpy (
3764
- dst->data , src->data , ggml_nbytes (dst)). wait () ));
3763
+ dst->data , src->data , ggml_nbytes (dst))));
3765
3764
return true ;
3766
3765
}
3767
3766
0 commit comments