Skip to content

Commit a979ca2

Browse files
authored
ggml: adds CONV_2D op and direct GEMM Vulkan implementation (ggml-org#14316)
* ggml/ggml-vulkan/test-backend-ops: adds CONV_2D for Vulkan * ggml-vulkan: adds f32 scalar shader to compute 2D convolution directly with gemm (no need for im2col), * test-backend-ops: adds test_case_ref to check the validity/performance of ops against reference implementations having different graphs, adds tests * * Performance fixes: minimized branch divergence, uses collectives to eliminate redundant calculation, macros removed. * Kernel shared memory size check * Updates test-backend-ops to support graphs for performance measurement. * * Apple/Win32 compile errors fixed * Subgroup size used to determine tile size -> fixes llvmpipe errors. * Collectives disabled by default. * Intel support is disabled as the performance is poor. * Conv2d enabled for Intel with disabled collectives, disabled for Apple * test-backend-ops modifications are reverted * Trailing spaces and missing override fixed. * Triggering pipeline relaunch. * Code formatted with .clang-format.
1 parent 9008328 commit a979ca2

File tree

4 files changed

+711
-11
lines changed

4 files changed

+711
-11
lines changed

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 246 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -483,6 +483,7 @@ struct vk_device_struct {
483483
vk_pipeline pipeline_rwkv_wkv6_f32;
484484
vk_pipeline pipeline_rwkv_wkv7_f32;
485485
vk_pipeline pipeline_opt_step_adamw_f32;
486+
vk_pipeline pipeline_conv2d_f32;
486487
vk_pipeline pipeline_conv2d_dw_whcn_f32;
487488
vk_pipeline pipeline_conv2d_dw_cwhn_f32;
488489

@@ -876,6 +877,38 @@ struct vk_op_rwkv_wkv7_push_constants {
876877
uint32_t H;
877878
};
878879

880+
struct vk_op_conv2d_push_constants {
881+
uint32_t Cout;
882+
uint32_t Cin;
883+
uint32_t N;
884+
885+
uint32_t KW;
886+
uint32_t KH;
887+
uint32_t W;
888+
uint32_t H;
889+
uint32_t OW;
890+
uint32_t OH;
891+
892+
uint32_t s0;
893+
uint32_t s1;
894+
uint32_t p0;
895+
uint32_t p1;
896+
uint32_t d0;
897+
uint32_t d1;
898+
899+
uint32_t nb01;
900+
uint32_t nb02;
901+
uint32_t nb03;
902+
903+
uint32_t nb11;
904+
uint32_t nb12;
905+
uint32_t nb13;
906+
907+
uint32_t nb1;
908+
uint32_t nb2;
909+
uint32_t nb3;
910+
};
911+
879912
struct vk_op_conv2d_dw_push_constants {
880913
uint32_t ne;
881914
uint32_t batches;
@@ -975,18 +1008,45 @@ class vk_memory_logger {
9751008
#endif // GGML_VULKAN_MEMORY_DEBUG
9761009

9771010
class vk_perf_logger {
978-
public:
1011+
public:
9791012
void print_timings() {
1013+
if (timings.empty()) {
1014+
return;
1015+
}
1016+
uint64_t total_all_op_times = 0;
9801017
std::cerr << "----------------\nVulkan Timings:" << std::endl;
981-
for (const auto& t : timings) {
982-
uint64_t total = 0;
983-
for (const auto& time : t.second) {
984-
total += time;
1018+
for (const auto & t : timings) {
1019+
uint64_t total_op_times = 0;
1020+
for (const auto & time : t.second) {
1021+
total_op_times += time;
1022+
}
1023+
std::cerr << t.first << ": " << t.second.size() << " x " << (total_op_times / t.second.size() / 1000.0)
1024+
<< " us";
1025+
1026+
// If we have as many flops entries as timing entries for the op, then compute and log the flops/S.
1027+
auto it = flops.find(t.first);
1028+
if (it != flops.end() && (it->second).size() == t.second.size()) {
1029+
uint64_t total_op_flops = 0;
1030+
for (const auto & elem : it->second) {
1031+
total_op_flops += elem;
1032+
}
1033+
std::cerr << " ("
1034+
<< (double(total_op_flops) / (1000.0 * 1000.0 * 1000.0)) /
1035+
(double(total_op_times) / (1000.0 * 1000.0 * 1000.0))
1036+
<< " GFLOPS/s)";
9851037
}
986-
std::cerr << t.first << ": " << t.second.size() << " x " << (total / t.second.size() / 1000.0) << " us" << std::endl;
1038+
1039+
total_all_op_times += total_op_times;
1040+
1041+
std::cerr << std::endl;
1042+
}
1043+
1044+
if (timings.size() > 0) {
1045+
std::cerr << "Total time: " << total_all_op_times / 1000.0 << " us." << std::endl;
9871046
}
9881047

9891048
timings.clear();
1049+
flops.clear();
9901050
}
9911051

9921052
void log_timing(const ggml_tensor * node, uint64_t time) {
@@ -995,22 +1055,45 @@ class vk_perf_logger {
9951055
return;
9961056
}
9971057
if (node->op == GGML_OP_MUL_MAT || node->op == GGML_OP_MUL_MAT_ID) {
998-
const uint64_t m = node->src[0]->ne[1];
999-
const uint64_t n = node->src[1]->ne[1];
1000-
const uint64_t k = node->src[1]->ne[0];
1001-
std::string name = ggml_op_name(node->op);
1058+
const uint64_t m = node->src[0]->ne[1];
1059+
const uint64_t n = node->src[1]->ne[1];
1060+
const uint64_t k = node->src[1]->ne[0];
1061+
std::string name = ggml_op_name(node->op);
10021062
if (n == 1) {
10031063
name += "_VEC m=" + std::to_string(m) + " k=" + std::to_string(k);
10041064
} else {
10051065
name += " m=" + std::to_string(m) + " n=" + std::to_string(n) + " k=" + std::to_string(k);
10061066
}
10071067
timings[name].push_back(time);
1068+
flops[name].push_back(m * n * (k + (k - 1)));
1069+
return;
1070+
}
1071+
if (node->op == GGML_OP_CONV_2D) {
1072+
std::string name = ggml_op_name(node->op);
1073+
ggml_tensor * knl = node->src[0];
1074+
uint64_t OW = node->ne[0];
1075+
uint64_t OH = node->ne[1];
1076+
uint64_t N = node->ne[3];
1077+
uint64_t Cout = node->ne[2];
1078+
uint64_t KW = knl->ne[0];
1079+
uint64_t KH = knl->ne[1];
1080+
uint64_t Cin = knl->ne[2];
1081+
// KxCRS @ CRSxNPQ = KxNPQ -> M=K, K=CRS, N=NPQ
1082+
uint64_t size_M = Cout;
1083+
uint64_t size_K = Cin * KW * KH;
1084+
uint64_t size_N = N * OW * OH;
1085+
uint64_t n_flops = size_M * size_N * (size_K + (size_K - 1));
1086+
name += " M=Cout=" + std::to_string(size_M) + ", K=Cin*KW*KH=" + std::to_string(size_K) +
1087+
", N=N*OW*OH=" + std::to_string(size_N);
1088+
flops[name].push_back(n_flops);
1089+
timings[name].push_back(time);
10081090
return;
10091091
}
10101092
timings[ggml_op_name(node->op)].push_back(time);
10111093
}
1012-
private:
1094+
private:
10131095
std::map<std::string, std::vector<uint64_t>> timings;
1096+
std::map<std::string, std::vector<uint64_t>> flops;
10141097
};
10151098

10161099
struct ggml_backend_vk_context {
@@ -2113,6 +2196,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
21132196
}
21142197
compile_count++;
21152198
}
2199+
21162200
compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), spv_size, spv_data, entrypoint,
21172201
parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size));
21182202
};
@@ -2962,6 +3046,42 @@ static void ggml_vk_load_shaders(vk_device& device) {
29623046

29633047
ggml_vk_create_pipeline(device, device->pipeline_opt_step_adamw_f32, "opt_step_adamw_f32", opt_step_adamw_f32_len, opt_step_adamw_f32_data, "main", 5, sizeof(vk_op_push_constants), {512, 1, 1}, {}, 1);
29643048

3049+
// conv2d
3050+
uint32_t conv2d_WG_SIZE = 256;
3051+
uint32_t conv2d_BS_K = 128;
3052+
uint32_t conv2d_BS_CRS = 16;
3053+
uint32_t use_collectives = 0; // Enables subgroup ops for preventing the re-calculation of indices.
3054+
if (device->subgroup_shuffle &&
3055+
device->vendor_id != VK_VENDOR_ID_INTEL) { // Do not enable collectives on Intel, see PR 14316
3056+
use_collectives = 1;
3057+
conv2d_BS_CRS = std::min(
3058+
device->subgroup_size,
3059+
conv2d_BS_CRS); // CRS block size should be capped at sugroup size for correctness when shuffle is used.
3060+
}
3061+
uint32_t conv2d_BS_NPQ = 128;
3062+
uint32_t conv2d_TS_K = 8;
3063+
uint32_t conv2d_shmem_req =
3064+
(conv2d_BS_K * (conv2d_BS_CRS + 1) + conv2d_BS_CRS * (conv2d_BS_NPQ + 1)) * sizeof(float);
3065+
if (device->properties.limits.maxComputeSharedMemorySize < conv2d_shmem_req) {
3066+
conv2d_BS_CRS = 8;
3067+
if (use_collectives) {
3068+
conv2d_BS_CRS = std::min(device->subgroup_size, conv2d_BS_CRS);
3069+
}
3070+
}
3071+
3072+
if (use_collectives) {
3073+
ggml_vk_create_pipeline(
3074+
device, device->pipeline_conv2d_f32, "conv2d_f32", conv2d_f32_len, conv2d_f32_data, "main", 3,
3075+
sizeof(vk_op_conv2d_push_constants), { conv2d_BS_K, conv2d_BS_NPQ, 1 },
3076+
{ conv2d_WG_SIZE, conv2d_BS_K, conv2d_BS_CRS, conv2d_BS_NPQ, conv2d_TS_K, use_collectives }, 1, true, true);
3077+
} else {
3078+
ggml_vk_create_pipeline(
3079+
device, device->pipeline_conv2d_f32, "conv2d_f32", conv2d_f32_len, conv2d_f32_data, "main", 3,
3080+
sizeof(vk_op_conv2d_push_constants), { conv2d_BS_K, conv2d_BS_NPQ, 1 },
3081+
{ conv2d_WG_SIZE, conv2d_BS_K, conv2d_BS_CRS, conv2d_BS_NPQ, conv2d_TS_K, use_collectives }, 1, true,
3082+
false);
3083+
}
3084+
29653085
ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_whcn_f32, "conv2d_dw_whcn_f32", conv2d_dw_whcn_f32_len, conv2d_dw_whcn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
29663086
ggml_vk_create_pipeline(device, device->pipeline_conv2d_dw_cwhn_f32, "conv2d_dw_cwhn_f32", conv2d_dw_cwhn_f32_len, conv2d_dw_cwhn_f32_data, "main", 3, sizeof(vk_op_conv2d_dw_push_constants), {512, 1, 1}, {}, 1);
29673087

@@ -6837,6 +6957,12 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
68376957
return ctx->device->pipeline_leaky_relu_f32;
68386958
}
68396959
return nullptr;
6960+
case GGML_OP_CONV_2D:
6961+
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 &&
6962+
ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) {
6963+
return ctx->device->pipeline_conv2d_f32;
6964+
}
6965+
return nullptr;
68406966
case GGML_OP_CONV_2D_DW:
68416967
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
68426968
if (ggml_is_contiguous(src1)) {
@@ -7159,6 +7285,31 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
71597285
const uint32_t OW = dst->ne[0];
71607286
elements = { N * OC * OH * OW, 1, 1};
71617287
} break;
7288+
case GGML_OP_CONV_2D:
7289+
{
7290+
// src0 - kernel: [KW, KH, Cin, Cout]
7291+
// src1 - input: [W, H, Cin, N]
7292+
// dst - result: [OW, OH, Cout, N]
7293+
7294+
// Copied from ggml.c: int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d)
7295+
auto calc_conv_output_size = [](int64_t ins, int64_t ks, int s, int p, int d) -> int64_t {
7296+
return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
7297+
};
7298+
// parallelize in {OW/BS_K, OH/BS_NPQ, 1}
7299+
int64_t W = src1->ne[0];
7300+
int64_t H = src1->ne[1];
7301+
int64_t KW = src0->ne[0];
7302+
int64_t KH = src0->ne[1];
7303+
int64_t Cout = src0->ne[3];
7304+
int64_t N = src1->ne[3];
7305+
int64_t OH = calc_conv_output_size(H, KH, dst->op_params[1], dst->op_params[3], dst->op_params[5]);
7306+
int64_t OW = calc_conv_output_size(W, KW, dst->op_params[0], dst->op_params[2], dst->op_params[4]);
7307+
int64_t NPQ = N * OW * OH;
7308+
7309+
// Tile output matrix to (K/NB_K, NPQ/NB_NPQ, 1) workgroups
7310+
elements = { static_cast<uint32_t>(Cout), static_cast<uint32_t>(NPQ), 1 };
7311+
}
7312+
break;
71627313
case GGML_OP_ADD:
71637314
case GGML_OP_SUB:
71647315
case GGML_OP_DIV:
@@ -8025,6 +8176,55 @@ static void ggml_vk_pool_2d(ggml_backend_vk_context * ctx, vk_context& subctx, c
80258176
}, dryrun);
80268177
}
80278178

8179+
static void ggml_vk_conv_2d(ggml_backend_vk_context * ctx, vk_context & subctx, const ggml_tensor * src0,
8180+
const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
8181+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
8182+
GGML_ASSERT(src1->type == GGML_TYPE_F32);
8183+
GGML_ASSERT(dst->type == GGML_TYPE_F32);
8184+
8185+
GGML_TENSOR_BINARY_OP_LOCALS
8186+
8187+
GGML_ASSERT(nb00 == sizeof(float));
8188+
GGML_ASSERT(nb10 == sizeof(float));
8189+
GGML_ASSERT(nb0 == sizeof(float));
8190+
8191+
vk_op_conv2d_push_constants p{};
8192+
p.Cout = static_cast<uint32_t>(ne03);
8193+
p.Cin = static_cast<uint32_t>(ne02);
8194+
p.N = static_cast<uint32_t>(ne13);
8195+
8196+
p.KW = static_cast<uint32_t>(ne00);
8197+
p.KH = static_cast<uint32_t>(ne01);
8198+
p.W = static_cast<uint32_t>(ne10);
8199+
p.H = static_cast<uint32_t>(ne11);
8200+
p.OW = static_cast<uint32_t>(ne0);
8201+
p.OH = static_cast<uint32_t>(ne1);
8202+
8203+
p.s0 = static_cast<uint32_t>(dst->op_params[0]);
8204+
p.s1 = static_cast<uint32_t>(dst->op_params[1]);
8205+
p.p0 = static_cast<uint32_t>(dst->op_params[2]);
8206+
p.p1 = static_cast<uint32_t>(dst->op_params[3]);
8207+
p.d0 = static_cast<uint32_t>(dst->op_params[4]);
8208+
p.d1 = static_cast<uint32_t>(dst->op_params[5]);
8209+
8210+
p.nb01 = static_cast<uint32_t>(nb01 / nb00);
8211+
p.nb02 = static_cast<uint32_t>(nb02 / nb00);
8212+
p.nb03 = static_cast<uint32_t>(nb03 / nb00);
8213+
8214+
p.nb11 = static_cast<uint32_t>(nb11 / nb10);
8215+
p.nb12 = static_cast<uint32_t>(nb12 / nb10);
8216+
p.nb13 = static_cast<uint32_t>(nb13 / nb10);
8217+
8218+
p.nb1 = static_cast<uint32_t>(nb1 / nb0);
8219+
p.nb2 = static_cast<uint32_t>(nb2 / nb0);
8220+
p.nb3 = static_cast<uint32_t>(nb3 / nb0);
8221+
8222+
GGML_ASSERT(ne03 == ne2);
8223+
GGML_ASSERT(ne02 == ne12);
8224+
8225+
ggml_vk_op_f32(ctx, subctx, src0, src1, nullptr, dst, GGML_OP_CONV_2D, std::move(p), dryrun);
8226+
}
8227+
80288228
static void ggml_vk_conv_2d_dw(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
80298229
vk_op_conv2d_dw_push_constants p{};
80308230
p.ne = ggml_nelements(dst);
@@ -9087,6 +9287,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
90879287
case GGML_OP_TIMESTEP_EMBEDDING:
90889288
case GGML_OP_CONV_TRANSPOSE_1D:
90899289
case GGML_OP_POOL_2D:
9290+
case GGML_OP_CONV_2D:
90909291
case GGML_OP_CONV_2D_DW:
90919292
case GGML_OP_RWKV_WKV6:
90929293
case GGML_OP_RWKV_WKV7:
@@ -9154,6 +9355,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
91549355
case GGML_OP_TIMESTEP_EMBEDDING:
91559356
case GGML_OP_CONV_TRANSPOSE_1D:
91569357
case GGML_OP_POOL_2D:
9358+
case GGML_OP_CONV_2D:
91579359
case GGML_OP_CONV_2D_DW:
91589360
case GGML_OP_LEAKY_RELU:
91599361
{
@@ -9360,6 +9562,10 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
93609562
case GGML_OP_POOL_2D:
93619563
ggml_vk_pool_2d(ctx, compute_ctx, src0, node, dryrun);
93629564

9565+
break;
9566+
case GGML_OP_CONV_2D:
9567+
ggml_vk_conv_2d(ctx, compute_ctx, src0, src1, node, dryrun);
9568+
93639569
break;
93649570
case GGML_OP_CONV_2D_DW:
93659571
ggml_vk_conv_2d_dw(ctx, compute_ctx, src0, src1, node, dryrun);
@@ -9490,6 +9696,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
94909696
case GGML_OP_TIMESTEP_EMBEDDING:
94919697
case GGML_OP_CONV_TRANSPOSE_1D:
94929698
case GGML_OP_POOL_2D:
9699+
case GGML_OP_CONV_2D:
94939700
case GGML_OP_CONV_2D_DW:
94949701
case GGML_OP_RWKV_WKV6:
94959702
case GGML_OP_RWKV_WKV7:
@@ -10071,6 +10278,12 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg
1007110278
ggml_vk_build_graph(ctx, cgraph, i, nullptr, 0, true, false, false, false);
1007210279
if (cgraph->nodes[i]->op == GGML_OP_MUL_MAT || cgraph->nodes[i]->op == GGML_OP_MUL_MAT_ID) {
1007310280
total_mat_mul_bytes += ggml_nbytes(cgraph->nodes[i]->src[0]);
10281+
} else if (cgraph->nodes[i]->op == GGML_OP_CONV_2D) {
10282+
// Return CRSxNPQxsizeof(*) to account as many bytes as mul_mat has in im2col->mul_mat mode.
10283+
auto CRS_size =
10284+
cgraph->nodes[i]->src[0]->ne[0] * cgraph->nodes[i]->src[0]->ne[1] * cgraph->nodes[i]->src[0]->ne[2];
10285+
auto NPQ_size = cgraph->nodes[i]->ne[0] * cgraph->nodes[i]->ne[1] * cgraph->nodes[i]->ne[3];
10286+
total_mat_mul_bytes += NPQ_size * CRS_size * ggml_type_size(cgraph->nodes[i]->type);
1007410287
}
1007510288
i += ctx->num_additional_fused_ops;
1007610289
ctx->num_additional_fused_ops = 0;
@@ -10647,6 +10860,20 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1064710860
return true;
1064810861
case GGML_OP_CONV_TRANSPOSE_1D:
1064910862
return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32;
10863+
case GGML_OP_CONV_2D:
10864+
{
10865+
// Op is disabled for Apple because it segfaults at pipeline create time on MoltenVK
10866+
ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context;
10867+
const vk_device& device = ggml_vk_get_device(ctx->device);
10868+
bool is_Apple = ggml_vk_get_device(ctx->device)->vendor_id == VK_VENDOR_ID_APPLE;
10869+
// Channel-contiguous format is not supported yet.
10870+
return (op->src[0]->type == GGML_TYPE_F32 &&
10871+
op->src[1]->type == GGML_TYPE_F32 &&
10872+
op->type == GGML_TYPE_F32 &&
10873+
ggml_is_contiguous(op->src[0]) &&
10874+
ggml_is_contiguous(op->src[1]) &&
10875+
ggml_is_contiguous(op)) && !is_Apple;
10876+
}
1065010877
default:
1065110878
return false;
1065210879
}
@@ -11205,6 +11432,14 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
1120511432
const int32_t p1 = tensor->op_params[6];
1120611433

1120711434
tensor_clone = ggml_pool_2d(ggml_ctx, src_clone[0], op, k0, k1, s0, s1, p0, p1);
11435+
} else if (tensor->op == GGML_OP_CONV_2D) {
11436+
const int32_t s0 = tensor->op_params[0];
11437+
const int32_t s1 = tensor->op_params[1];
11438+
const int32_t p0 = tensor->op_params[2];
11439+
const int32_t p1 = tensor->op_params[3];
11440+
const int32_t d0 = tensor->op_params[4];
11441+
const int32_t d1 = tensor->op_params[5];
11442+
tensor_clone = ggml_conv_2d(ggml_ctx, src_clone[0], src_clone[1], s0, s1, p0, p1, d0, d1);
1120811443
} else if (tensor->op == GGML_OP_LEAKY_RELU) {
1120911444
const float * op_params = (const float *)tensor->op_params;
1121011445
tensor_clone = ggml_leaky_relu(ggml_ctx, src_clone[0], op_params[0], false);

0 commit comments

Comments
 (0)