Skip to content

Commit 290fc47

Browse files
authored
Merge pull request #1711 from CEED/jeremy/shared-at-points
GPU Shared AtPoints Bases
2 parents 40b22b2 + a8d440f commit 290fc47

8 files changed

+1664
-54
lines changed

backends/cuda-shared/ceed-cuda-shared-basis.c

+85-27
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,
6464
if (dim == 1) {
6565
CeedInt elems_per_block = CeedIntMin(ceed_Cuda->device_prop.maxThreadsDim[2], CeedIntMax(512 / thread_1d,
6666
1)); // avoid >512 total threads
67-
CeedInt grid = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
67+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
6868
CeedInt shared_mem = elems_per_block * thread_1d * sizeof(CeedScalar);
6969

7070
if (t_mode == CEED_TRANSPOSE) {
@@ -77,7 +77,7 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,
7777
const CeedInt opt_elems[7] = {0, 32, 8, 6, 4, 2, 8};
7878
// elems_per_block must be at least 1
7979
CeedInt elems_per_block = CeedIntMax(thread_1d < 7 ? opt_elems[thread_1d] / num_comp : 1, 1);
80-
CeedInt grid = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
80+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
8181
CeedInt shared_mem = elems_per_block * thread_1d * thread_1d * sizeof(CeedScalar);
8282

8383
if (t_mode == CEED_TRANSPOSE) {
@@ -88,7 +88,7 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,
8888
}
8989
} else if (dim == 3) {
9090
CeedInt elems_per_block = 1;
91-
CeedInt grid = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
91+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
9292
CeedInt shared_mem = elems_per_block * thread_1d * thread_1d * sizeof(CeedScalar);
9393

9494
if (t_mode == CEED_TRANSPOSE) {
@@ -115,7 +115,7 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,
115115
if (dim == 1) {
116116
CeedInt elems_per_block = CeedIntMin(ceed_Cuda->device_prop.maxThreadsDim[2], CeedIntMax(512 / thread_1d,
117117
1)); // avoid >512 total threads
118-
CeedInt grid = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
118+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
119119
CeedInt shared_mem = elems_per_block * thread_1d * sizeof(CeedScalar);
120120

121121
if (t_mode == CEED_TRANSPOSE) {
@@ -128,7 +128,7 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,
128128
const CeedInt opt_elems[7] = {0, 32, 8, 6, 4, 2, 8};
129129
// elems_per_block must be at least 1
130130
CeedInt elems_per_block = CeedIntMax(thread_1d < 7 ? opt_elems[thread_1d] / num_comp : 1, 1);
131-
CeedInt grid = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
131+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
132132
CeedInt shared_mem = elems_per_block * thread_1d * thread_1d * sizeof(CeedScalar);
133133

134134
if (t_mode == CEED_TRANSPOSE) {
@@ -139,7 +139,7 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,
139139
}
140140
} else if (dim == 3) {
141141
CeedInt elems_per_block = 1;
142-
CeedInt grid = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
142+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
143143
CeedInt shared_mem = elems_per_block * thread_1d * thread_1d * sizeof(CeedScalar);
144144

145145
if (t_mode == CEED_TRANSPOSE) {
@@ -159,19 +159,19 @@ static int CeedBasisApplyTensorCore_Cuda_shared(CeedBasis basis, bool apply_add,
159159
void *weight_args[] = {(void *)&num_elem, (void *)&data->d_q_weight_1d, &d_v};
160160
if (dim == 1) {
161161
const CeedInt elems_per_block = block_size / Q_1d;
162-
const CeedInt grid_size = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
162+
const CeedInt grid_size = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
163163

164164
CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Weight, grid_size, Q_1d, elems_per_block, 1, weight_args));
165165
} else if (dim == 2) {
166166
const CeedInt opt_elems = block_size / (Q_1d * Q_1d);
167167
const CeedInt elems_per_block = opt_elems > 0 ? opt_elems : 1;
168-
const CeedInt grid_size = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
168+
const CeedInt grid_size = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
169169

170170
CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Weight, grid_size, Q_1d, Q_1d, elems_per_block, weight_args));
171171
} else if (dim == 3) {
172172
const CeedInt opt_elems = block_size / (Q_1d * Q_1d);
173173
const CeedInt elems_per_block = opt_elems > 0 ? opt_elems : 1;
174-
const CeedInt grid_size = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0);
174+
const CeedInt grid_size = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
175175

176176
CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Weight, grid_size, Q_1d, Q_1d, elems_per_block, weight_args));
177177
}
@@ -211,16 +211,17 @@ static int CeedBasisApplyAddTensor_Cuda_shared(CeedBasis basis, const CeedInt nu
211211
static int CeedBasisApplyAtPointsCore_Cuda_shared(CeedBasis basis, bool apply_add, const CeedInt num_elem, const CeedInt *num_points,
212212
CeedTransposeMode t_mode, CeedEvalMode eval_mode, CeedVector x_ref, CeedVector u, CeedVector v) {
213213
Ceed ceed;
214-
CeedInt Q_1d, dim, max_num_points = num_points[0];
215-
const CeedInt is_transpose = t_mode == CEED_TRANSPOSE;
216-
const int max_block_size = 32;
214+
Ceed_Cuda *ceed_Cuda;
215+
CeedInt Q_1d, dim, num_comp, max_num_points = num_points[0];
216+
const CeedInt is_transpose = t_mode == CEED_TRANSPOSE;
217217
const CeedScalar *d_x, *d_u;
218218
CeedScalar *d_v;
219219
CeedBasis_Cuda_shared *data;
220220

221221
CeedCallBackend(CeedBasisGetData(basis, &data));
222222
CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
223223
CeedCallBackend(CeedBasisGetDimension(basis, &dim));
224+
CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp));
224225

225226
// Weight handled separately
226227
if (eval_mode == CEED_EVAL_WEIGHT) {
@@ -229,14 +230,13 @@ static int CeedBasisApplyAtPointsCore_Cuda_shared(CeedBasis basis, bool apply_ad
229230
}
230231

231232
CeedCallBackend(CeedBasisGetCeed(basis, &ceed));
233+
CeedCallBackend(CeedGetData(ceed, &ceed_Cuda));
232234

233235
// Check padded to uniform number of points per elem
234236
for (CeedInt i = 1; i < num_elem; i++) max_num_points = CeedIntMax(max_num_points, num_points[i]);
235237
{
236-
CeedInt num_comp, q_comp;
238+
CeedInt q_comp;
237239
CeedSize len, len_required;
238-
239-
CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp));
240240
CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, eval_mode, &q_comp));
241241
CeedCallBackend(CeedVectorGetLength(is_transpose ? u : v, &len));
242242
len_required = (CeedSize)num_comp * (CeedSize)q_comp * (CeedSize)num_elem * (CeedSize)max_num_points;
@@ -285,15 +285,14 @@ static int CeedBasisApplyAtPointsCore_Cuda_shared(CeedBasis basis, bool apply_ad
285285
}
286286

287287
// -- Compile kernels
288-
const char basis_kernel_source[] = "// AtPoints basis source\n#include <ceed/jit-source/cuda/cuda-ref-basis-tensor-at-points.h>\n";
288+
const char basis_kernel_source[] = "// AtPoints basis source\n#include <ceed/jit-source/cuda/cuda-shared-basis-tensor-at-points.h>\n";
289289
CeedInt num_comp;
290290

291291
if (data->moduleAtPoints) CeedCallCuda(ceed, cuModuleUnload(data->moduleAtPoints));
292292
CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp));
293-
CeedCallBackend(CeedCompile_Cuda(ceed, basis_kernel_source, &data->moduleAtPoints, 9, "BASIS_Q_1D", Q_1d, "BASIS_P_1D", P_1d, "BASIS_BUF_LEN",
294-
Q_1d * CeedIntPow(Q_1d > P_1d ? Q_1d : P_1d, dim - 1), "BASIS_DIM", dim, "BASIS_NUM_COMP", num_comp,
295-
"BASIS_NUM_NODES", CeedIntPow(P_1d, dim), "BASIS_NUM_QPTS", CeedIntPow(Q_1d, dim), "BASIS_NUM_PTS",
296-
max_num_points, "POINTS_BUFF_LEN", CeedIntPow(Q_1d, dim - 1)));
293+
CeedCallBackend(CeedCompile_Cuda(ceed, basis_kernel_source, &data->moduleAtPoints, 8, "BASIS_Q_1D", Q_1d, "BASIS_P_1D", P_1d, "T_1D",
294+
CeedIntMax(Q_1d, P_1d), "BASIS_DIM", dim, "BASIS_NUM_COMP", num_comp, "BASIS_NUM_NODES", CeedIntPow(P_1d, dim),
295+
"BASIS_NUM_QPTS", CeedIntPow(Q_1d, dim), "BASIS_NUM_PTS", max_num_points));
297296
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->moduleAtPoints, "InterpAtPoints", &data->InterpAtPoints));
298297
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->moduleAtPoints, "InterpTransposeAtPoints", &data->InterpTransposeAtPoints));
299298
CeedCallBackend(CeedGetKernel_Cuda(ceed, data->moduleAtPoints, "GradAtPoints", &data->GradAtPoints));
@@ -323,17 +322,76 @@ static int CeedBasisApplyAtPointsCore_Cuda_shared(CeedBasis basis, bool apply_ad
323322
// Basis action
324323
switch (eval_mode) {
325324
case CEED_EVAL_INTERP: {
326-
void *interp_args[] = {(void *)&num_elem, &data->d_chebyshev_interp_1d, &data->d_points_per_elem, &d_x, &d_u, &d_v};
327-
const CeedInt block_size = CeedIntMin(CeedIntPow(Q_1d, dim), max_block_size);
325+
CeedInt P_1d, Q_1d;
328326

329-
CeedCallBackend(
330-
CeedRunKernel_Cuda(ceed, is_transpose ? data->InterpTransposeAtPoints : data->InterpAtPoints, num_elem, block_size, interp_args));
327+
CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
328+
CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
329+
CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);
330+
331+
CeedCallBackend(CeedInit_CudaInterp(data->d_chebyshev_interp_1d, P_1d, Q_1d, &data->c_B));
332+
void *interp_args[] = {(void *)&num_elem, &data->c_B, &data->d_points_per_elem, &d_x, &d_u, &d_v};
333+
334+
if (dim == 1) {
335+
CeedInt elems_per_block = CeedIntMin(ceed_Cuda->device_prop.maxThreadsDim[2], CeedIntMax(512 / thread_1d,
336+
1)); // avoid >512 total threads
337+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
338+
CeedInt shared_mem = elems_per_block * thread_1d * sizeof(CeedScalar);
339+
340+
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, is_transpose ? data->InterpTransposeAtPoints : data->InterpAtPoints, grid, thread_1d, 1,
341+
elems_per_block, shared_mem, interp_args));
342+
} else if (dim == 2) {
343+
const CeedInt opt_elems[7] = {0, 32, 8, 6, 4, 2, 8};
344+
// elems_per_block must be at least 1
345+
CeedInt elems_per_block = CeedIntMax(thread_1d < 7 ? opt_elems[thread_1d] / num_comp : 1, 1);
346+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
347+
CeedInt shared_mem = elems_per_block * thread_1d * thread_1d * sizeof(CeedScalar);
348+
349+
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, is_transpose ? data->InterpTransposeAtPoints : data->InterpAtPoints, grid, thread_1d,
350+
thread_1d, elems_per_block, shared_mem, interp_args));
351+
} else if (dim == 3) {
352+
CeedInt elems_per_block = 1;
353+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
354+
CeedInt shared_mem = elems_per_block * thread_1d * thread_1d * sizeof(CeedScalar);
355+
356+
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, is_transpose ? data->InterpTransposeAtPoints : data->InterpAtPoints, grid, thread_1d,
357+
thread_1d, elems_per_block, shared_mem, interp_args));
358+
}
331359
} break;
332360
case CEED_EVAL_GRAD: {
333-
void *grad_args[] = {(void *)&num_elem, &data->d_chebyshev_interp_1d, &data->d_points_per_elem, &d_x, &d_u, &d_v};
334-
const CeedInt block_size = CeedIntMin(CeedIntPow(Q_1d, dim), max_block_size);
361+
CeedInt P_1d, Q_1d;
362+
363+
CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d));
364+
CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d));
365+
CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);
366+
367+
CeedCallBackend(CeedInit_CudaInterp(data->d_chebyshev_interp_1d, P_1d, Q_1d, &data->c_B));
368+
void *grad_args[] = {(void *)&num_elem, &data->d_chebyshev_interp_1d, &data->d_points_per_elem, &d_x, &d_u, &d_v};
369+
370+
if (dim == 1) {
371+
CeedInt elems_per_block = CeedIntMin(ceed_Cuda->device_prop.maxThreadsDim[2], CeedIntMax(512 / thread_1d,
372+
1)); // avoid >512 total threads
373+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
374+
CeedInt shared_mem = elems_per_block * thread_1d * sizeof(CeedScalar);
375+
376+
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, is_transpose ? data->GradTransposeAtPoints : data->GradAtPoints, grid, thread_1d, 1,
377+
elems_per_block, shared_mem, grad_args));
378+
} else if (dim == 2) {
379+
const CeedInt opt_elems[7] = {0, 32, 8, 6, 4, 2, 8};
380+
// elems_per_block must be at least 1
381+
CeedInt elems_per_block = CeedIntMax(thread_1d < 7 ? opt_elems[thread_1d] / num_comp : 1, 1);
382+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
383+
CeedInt shared_mem = elems_per_block * thread_1d * thread_1d * sizeof(CeedScalar);
384+
385+
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, is_transpose ? data->GradTransposeAtPoints : data->GradAtPoints, grid, thread_1d, thread_1d,
386+
elems_per_block, shared_mem, grad_args));
387+
} else if (dim == 3) {
388+
CeedInt elems_per_block = 1;
389+
CeedInt grid = num_elem / elems_per_block + (num_elem % elems_per_block > 0);
390+
CeedInt shared_mem = elems_per_block * thread_1d * thread_1d * sizeof(CeedScalar);
335391

336-
CeedCallBackend(CeedRunKernel_Cuda(ceed, is_transpose ? data->GradTransposeAtPoints : data->GradAtPoints, num_elem, block_size, grad_args));
392+
CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, is_transpose ? data->GradTransposeAtPoints : data->GradAtPoints, grid, thread_1d, thread_1d,
393+
elems_per_block, shared_mem, grad_args));
394+
}
337395
} break;
338396
case CEED_EVAL_WEIGHT:
339397
case CEED_EVAL_NONE: /* handled separately below */

0 commit comments

Comments
 (0)