Skip to content

Commit 66b161a

Browse files
Merge pull request #960 from vinser52/svinogra_ipc_api
Update umfOpenIPCHandle API to use IPC handler instead of pool
2 parents 52680b5 + 55cf4ab commit 66b161a

File tree

10 files changed

+130
-30
lines changed

10 files changed

+130
-30
lines changed

examples/ipc_ipcapi/ipc_ipcapi_consumer.c

+8-1
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,13 @@ int main(int argc, char *argv[]) {
142142
goto err_destroy_OS_memory_provider;
143143
}
144144

145+
umf_ipc_handler_handle_t ipc_handler;
146+
umf_result = umfPoolGetIPCHandler(scalable_pool, &ipc_handler);
147+
if (umf_result != UMF_RESULT_SUCCESS) {
148+
fprintf(stderr, "[producer] ERROR: get IPC handler failed\n");
149+
goto err_destroy_scalable_pool;
150+
}
151+
145152
// connect to the producer
146153
producer_socket = consumer_connect_to_producer(port);
147154
if (producer_socket < 0) {
@@ -209,7 +216,7 @@ int main(int argc, char *argv[]) {
209216
len);
210217

211218
void *SHM_ptr;
212-
umf_result = umfOpenIPCHandle(scalable_pool, IPC_handle, &SHM_ptr);
219+
umf_result = umfOpenIPCHandle(ipc_handler, IPC_handle, &SHM_ptr);
213220
if (umf_result == UMF_RESULT_ERROR_NOT_SUPPORTED) {
214221
fprintf(stderr,
215222
"[consumer] SKIP: opening the IPC handle is not supported\n");

examples/ipc_level_zero/ipc_level_zero.c

+9-2
Original file line numberDiff line numberDiff line change
@@ -180,14 +180,21 @@ int main(void) {
180180

181181
fprintf(stdout, "Consumer pool created.\n");
182182

183+
umf_ipc_handler_handle_t ipc_handler = 0;
184+
umf_result = umfPoolGetIPCHandler(consumer_pool, &ipc_handler);
185+
if (umf_result != UMF_RESULT_SUCCESS) {
186+
fprintf(stderr, "ERROR: Failed to get IPC handler!\n");
187+
return -1;
188+
}
189+
183190
void *mapped_buf = NULL;
184-
umf_result = umfOpenIPCHandle(consumer_pool, ipc_handle, &mapped_buf);
191+
umf_result = umfOpenIPCHandle(ipc_handler, ipc_handle, &mapped_buf);
185192
if (umf_result != UMF_RESULT_SUCCESS) {
186193
fprintf(stderr, "ERROR: Failed to open IPC handle!\n");
187194
return -1;
188195
}
189196

190-
fprintf(stdout, "IPC handle opened in the consumer pool.\n");
197+
fprintf(stdout, "IPC handle opened.\n");
191198

192199
size_t *tmp_buf = malloc(BUFFER_SIZE);
193200
ret = level_zero_copy(consumer_context, device, tmp_buf, mapped_buf,

include/umf/ipc.h

+11-2
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@ extern "C" {
1919

2020
typedef struct umf_ipc_data_t *umf_ipc_handle_t;
2121

22+
typedef void *umf_ipc_handler_handle_t;
23+
2224
///
2325
/// @brief Returns the size of IPC handles for the specified pool.
2426
/// @param hPool [in] Pool handle
@@ -44,11 +46,11 @@ umf_result_t umfPutIPCHandle(umf_ipc_handle_t ipcHandle);
4446

4547
///
4648
/// @brief Open IPC handle retrieved by umfGetIPCHandle.
47-
/// @param hPool [in] Pool handle where to open the the IPC handle.
49+
/// @param hIPCHandler [in] IPC Handler handle used to open the IPC handle.
4850
/// @param ipcHandle [in] IPC handle.
4951
/// @param ptr [out] pointer to the memory in the current process.
5052
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
51-
umf_result_t umfOpenIPCHandle(umf_memory_pool_handle_t hPool,
53+
umf_result_t umfOpenIPCHandle(umf_ipc_handler_handle_t hIPCHandler,
5254
umf_ipc_handle_t ipcHandle, void **ptr);
5355

5456
///
@@ -57,6 +59,13 @@ umf_result_t umfOpenIPCHandle(umf_memory_pool_handle_t hPool,
5759
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
5860
umf_result_t umfCloseIPCHandle(void *ptr);
5961

62+
/// @brief Get handle to the IPC handler from existing pool.
63+
/// @param hPool [in] Pool handle
64+
/// @param hIPCHandler [out] handle to the IPC handler
65+
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
66+
umf_result_t umfPoolGetIPCHandler(umf_memory_pool_handle_t hPool,
67+
umf_ipc_handler_handle_t *hIPCHandler);
68+
6069
#ifdef __cplusplus
6170
}
6271
#endif

scripts/docs_config/examples.rst

+6-3
Original file line numberDiff line numberDiff line change
@@ -194,12 +194,15 @@ to another process it can be opened by the :any:`umfOpenIPCHandle` function.
194194

195195
.. code-block:: c
196196
197+
umf_ipc_handler_handle_t ipc_handler = 0;
198+
umf_result = umfPoolGetIPCHandler(consumer_pool, &ipc_handler);
199+
197200
void *mapped_buf = NULL;
198-
umf_result = umfOpenIPCHandle(consumer_pool, ipc_handle, &mapped_buf);
201+
umf_result = umfOpenIPCHandle(ipc_handler, ipc_handle, &mapped_buf);
199202
200-
The :any:`umfOpenIPCHandle` function requires the memory pool handle and the IPC handle as input parameters. It maps
203+
The :any:`umfOpenIPCHandle` function requires the IPC handler and the IPC handle as input parameters. The IPC handler maps
201204
the handle to the current process address space and returns the pointer to the same memory region that was allocated
202-
in the producer process.
205+
in the producer process. To retrieve the IPC handler, the :any:`umfPoolGetIPCHandler` function is used.
203206

204207
.. note::
205208
The virtual addresses of the memory region referred to by the IPC handle may not be the same in the producer and consumer processes.

src/ipc.c

+33-4
Original file line numberDiff line numberDiff line change
@@ -119,12 +119,18 @@ umf_result_t umfPutIPCHandle(umf_ipc_handle_t umfIPCHandle) {
119119
return ret;
120120
}
121121

122-
umf_result_t umfOpenIPCHandle(umf_memory_pool_handle_t hPool,
122+
umf_result_t umfOpenIPCHandle(umf_ipc_handler_handle_t hIPCHandler,
123123
umf_ipc_handle_t umfIPCHandle, void **ptr) {
124124

125-
// We cannot use umfPoolGetMemoryProvider function because it returns
126-
// upstream provider but we need tracking one
127-
umf_memory_provider_handle_t hProvider = hPool->provider;
125+
// IPC handler is an instance of tracking memory provider
126+
if (*(uint32_t *)hIPCHandler != UMF_VERSION_CURRENT) {
127+
// It is a temporary hack to verify that user passes correct IPC handler,
128+
// not a pool handle, as it was required in previous version.
129+
LOG_ERR("Invalid IPC handler.");
130+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
131+
}
132+
133+
umf_memory_provider_handle_t hProvider = hIPCHandler;
128134
void *base = NULL;
129135

130136
umf_result_t ret = umfMemoryProviderOpenIPCHandle(
@@ -153,3 +159,26 @@ umf_result_t umfCloseIPCHandle(void *ptr) {
153159
return umfMemoryProviderCloseIPCHandle(hProvider, allocInfo.base,
154160
allocInfo.baseSize);
155161
}
162+
163+
umf_result_t umfPoolGetIPCHandler(umf_memory_pool_handle_t hPool,
164+
umf_ipc_handler_handle_t *hIPCHandler) {
165+
if (hPool == NULL) {
166+
LOG_ERR("Pool handle is NULL.");
167+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
168+
}
169+
170+
if (hIPCHandler == NULL) {
171+
LOG_ERR("hIPCHandler is NULL.");
172+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
173+
}
174+
175+
// We cannot use umfPoolGetMemoryProvider function because it returns
176+
// upstream provider but we need tracking one
177+
umf_memory_provider_handle_t hProvider = hPool->provider;
178+
179+
// We are using tracking provider as an IPC handler because
180+
// it is doing IPC caching.
181+
*hIPCHandler = (umf_ipc_handler_handle_t)hProvider;
182+
183+
return UMF_RESULT_SUCCESS;
184+
}

src/libumf.def

+1
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ EXPORTS
102102
umfPoolCreateFromMemspace
103103
umfPoolDestroy
104104
umfPoolFree
105+
umfPoolGetIPCHandler
105106
umfPoolGetIPCHandleSize
106107
umfPoolGetLastAllocationError
107108
umfPoolGetMemoryProvider

src/libumf.map

+1
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ UMF_1.0 {
9696
umfPoolCreateFromMemspace;
9797
umfPoolDestroy;
9898
umfPoolFree;
99+
umfPoolGetIPCHandler;
99100
umfPoolGetIPCHandleSize;
100101
umfPoolGetLastAllocationError;
101102
umfPoolGetMemoryProvider;

test/common/ipc_common.c

+8-1
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,13 @@ int run_consumer(int port, umf_memory_pool_ops_t *pool_ops, void *pool_params,
138138
goto err_umfMemoryProviderDestroy;
139139
}
140140

141+
umf_ipc_handler_handle_t ipc_handler;
142+
umf_result = umfPoolGetIPCHandler(pool, &ipc_handler);
143+
if (umf_result != UMF_RESULT_SUCCESS) {
144+
fprintf(stderr, "[consumer] ERROR: get IPC handler failed\n");
145+
goto err_umfMemoryPoolDestroy;
146+
}
147+
141148
producer_socket = consumer_connect(port);
142149
if (producer_socket < 0) {
143150
goto err_umfMemoryPoolDestroy;
@@ -195,7 +202,7 @@ int run_consumer(int port, umf_memory_pool_ops_t *pool_ops, void *pool_params,
195202
len);
196203

197204
void *SHM_ptr;
198-
umf_result = umfOpenIPCHandle(pool, IPC_handle, &SHM_ptr);
205+
umf_result = umfOpenIPCHandle(ipc_handler, IPC_handle, &SHM_ptr);
199206
if (umf_result == UMF_RESULT_ERROR_NOT_SUPPORTED) {
200207
fprintf(stderr,
201208
"[consumer] SKIP: opening the IPC handle is not supported\n");

test/ipcFixtures.hpp

+47-15
Original file line numberDiff line numberDiff line change
@@ -207,12 +207,17 @@ TEST_P(umfIpcTest, BasicFlow) {
207207
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
208208
ASSERT_EQ(handleFullSize, handleHalfSize);
209209

210+
umf_ipc_handler_handle_t ipcHandler = nullptr;
211+
ret = umfPoolGetIPCHandler(pool.get(), &ipcHandler);
212+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
213+
ASSERT_NE(ipcHandler, nullptr);
214+
210215
void *fullArray = nullptr;
211-
ret = umfOpenIPCHandle(pool.get(), ipcHandleFull, &fullArray);
216+
ret = umfOpenIPCHandle(ipcHandler, ipcHandleFull, &fullArray);
212217
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
213218

214219
void *halfArray = nullptr;
215-
ret = umfOpenIPCHandle(pool.get(), ipcHandleHalf, &halfArray);
220+
ret = umfOpenIPCHandle(ipcHandler, ipcHandleHalf, &halfArray);
216221
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
217222

218223
std::vector<int> actual_data(SIZE);
@@ -276,8 +281,13 @@ TEST_P(umfIpcTest, GetPoolByOpenedHandle) {
276281

277282
for (size_t pool_id = 0; pool_id < NUM_POOLS; pool_id++) {
278283
void *ptr = nullptr;
284+
umf_ipc_handler_handle_t ipcHandler = nullptr;
279285
ret =
280-
umfOpenIPCHandle(pools_to_open[pool_id].get(), ipcHandle, &ptr);
286+
umfPoolGetIPCHandler(pools_to_open[pool_id].get(), &ipcHandler);
287+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
288+
ASSERT_NE(ipcHandler, nullptr);
289+
290+
ret = umfOpenIPCHandle(ipcHandler, ipcHandle, &ptr);
281291
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
282292
openedPtrs[pool_id][i] = ptr;
283293
}
@@ -311,16 +321,22 @@ TEST_P(umfIpcTest, GetPoolByOpenedHandle) {
311321
TEST_P(umfIpcTest, AllocFreeAllocTest) {
312322
constexpr size_t SIZE = 64 * 1024;
313323
umf::pool_unique_handle_t pool = makePool();
324+
umf_ipc_handler_handle_t ipcHandler = nullptr;
325+
326+
umf_result_t ret = umfPoolGetIPCHandler(pool.get(), &ipcHandler);
327+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
328+
ASSERT_NE(ipcHandler, nullptr);
329+
314330
void *ptr = umfPoolMalloc(pool.get(), SIZE);
315331
EXPECT_NE(ptr, nullptr);
316332

317333
umf_ipc_handle_t ipcHandle = nullptr;
318334
size_t handleSize = 0;
319-
umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize);
335+
ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize);
320336
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
321337

322338
void *opened_ptr = nullptr;
323-
ret = umfOpenIPCHandle(pool.get(), ipcHandle, &opened_ptr);
339+
ret = umfOpenIPCHandle(ipcHandler, ipcHandle, &opened_ptr);
324340
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
325341

326342
ret = umfCloseIPCHandle(opened_ptr);
@@ -343,7 +359,7 @@ TEST_P(umfIpcTest, AllocFreeAllocTest) {
343359
ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize);
344360
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
345361

346-
ret = umfOpenIPCHandle(pool.get(), ipcHandle, &opened_ptr);
362+
ret = umfOpenIPCHandle(ipcHandler, ipcHandle, &opened_ptr);
347363
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
348364

349365
ret = umfCloseIPCHandle(opened_ptr);
@@ -362,11 +378,22 @@ TEST_P(umfIpcTest, AllocFreeAllocTest) {
362378
EXPECT_EQ(stat.openCount, stat.closeCount);
363379
}
364380

365-
TEST_P(umfIpcTest, openInTwoPools) {
381+
TEST_P(umfIpcTest, openInTwoIpcHandlers) {
366382
constexpr size_t SIZE = 100;
367383
std::vector<int> expected_data(SIZE);
368384
umf::pool_unique_handle_t pool1 = makePool();
369385
umf::pool_unique_handle_t pool2 = makePool();
386+
umf_ipc_handler_handle_t ipcHandler1 = nullptr;
387+
umf_ipc_handler_handle_t ipcHandler2 = nullptr;
388+
389+
umf_result_t ret = umfPoolGetIPCHandler(pool1.get(), &ipcHandler1);
390+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
391+
ASSERT_NE(ipcHandler1, nullptr);
392+
393+
ret = umfPoolGetIPCHandler(pool2.get(), &ipcHandler2);
394+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
395+
ASSERT_NE(ipcHandler2, nullptr);
396+
370397
void *ptr = umfPoolMalloc(pool1.get(), sizeof(expected_data[0]) * SIZE);
371398
EXPECT_NE(ptr, nullptr);
372399

@@ -375,15 +402,15 @@ TEST_P(umfIpcTest, openInTwoPools) {
375402

376403
umf_ipc_handle_t ipcHandle = nullptr;
377404
size_t handleSize = 0;
378-
umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize);
405+
ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize);
379406
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
380407

381408
void *openedPtr1 = nullptr;
382-
ret = umfOpenIPCHandle(pool1.get(), ipcHandle, &openedPtr1);
409+
ret = umfOpenIPCHandle(ipcHandler1, ipcHandle, &openedPtr1);
383410
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
384411

385412
void *openedPtr2 = nullptr;
386-
ret = umfOpenIPCHandle(pool2.get(), ipcHandle, &openedPtr2);
413+
ret = umfOpenIPCHandle(ipcHandler2, ipcHandle, &openedPtr2);
387414
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
388415

389416
ret = umfPutIPCHandle(ipcHandle);
@@ -466,6 +493,7 @@ TEST_P(umfIpcTest, ConcurrentGetPutHandles) {
466493
}
467494

468495
TEST_P(umfIpcTest, ConcurrentOpenCloseHandles) {
496+
umf_result_t ret;
469497
std::vector<void *> ptrs;
470498
constexpr size_t ALLOC_SIZE = 100;
471499
constexpr size_t NUM_POINTERS = 100;
@@ -481,21 +509,25 @@ TEST_P(umfIpcTest, ConcurrentOpenCloseHandles) {
481509
for (size_t i = 0; i < NUM_POINTERS; ++i) {
482510
umf_ipc_handle_t ipcHandle;
483511
size_t handleSize;
484-
umf_result_t ret = umfGetIPCHandle(ptrs[i], &ipcHandle, &handleSize);
512+
ret = umfGetIPCHandle(ptrs[i], &ipcHandle, &handleSize);
485513
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
486514
ipcHandles[i] = ipcHandle;
487515
}
488516

489517
std::array<std::vector<void *>, NTHREADS> openedIpcHandles;
518+
umf_ipc_handler_handle_t ipcHandler = nullptr;
519+
ret = umfPoolGetIPCHandler(pool.get(), &ipcHandler);
520+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
521+
ASSERT_NE(ipcHandler, nullptr);
490522

491523
umf_test::syncthreads_barrier syncthreads(NTHREADS);
492524

493525
auto openHandlesFn = [&ipcHandles, &openedIpcHandles, &syncthreads,
494-
&pool](size_t tid) {
526+
ipcHandler](size_t tid) {
495527
syncthreads();
496528
for (auto ipcHandle : ipcHandles) {
497529
void *ptr;
498-
umf_result_t ret = umfOpenIPCHandle(pool.get(), ipcHandle, &ptr);
530+
umf_result_t ret = umfOpenIPCHandle(ipcHandler, ipcHandle, &ptr);
499531
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
500532
openedIpcHandles[tid].push_back(ptr);
501533
}
@@ -514,12 +546,12 @@ TEST_P(umfIpcTest, ConcurrentOpenCloseHandles) {
514546
umf_test::parallel_exec(NTHREADS, closeHandlesFn);
515547

516548
for (auto ipcHandle : ipcHandles) {
517-
umf_result_t ret = umfPutIPCHandle(ipcHandle);
549+
ret = umfPutIPCHandle(ipcHandle);
518550
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
519551
}
520552

521553
for (void *ptr : ptrs) {
522-
umf_result_t ret = umfPoolFree(pool.get(), ptr);
554+
ret = umfPoolFree(pool.get(), ptr);
523555
EXPECT_EQ(ret,
524556
get_umf_result_of_free(freeNotSupported, UMF_RESULT_SUCCESS));
525557
}

test/ipc_negative.cpp

+6-2
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,11 @@ TEST_F(IpcNotSupported, OpenIPCHandleNotSupported) {
4747
// This data doesn't matter, as the ipc call is no-op
4848
std::array<uint8_t, 128> ipc_data = {};
4949
void *ptr;
50-
auto ret = umfOpenIPCHandle(
51-
pool, reinterpret_cast<umf_ipc_handle_t>(&ipc_data), &ptr);
50+
umf_ipc_handler_handle_t ipc_handler;
51+
auto ret = umfPoolGetIPCHandler(pool, &ipc_handler);
52+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
53+
54+
ret = umfOpenIPCHandle(ipc_handler,
55+
reinterpret_cast<umf_ipc_handle_t>(&ipc_data), &ptr);
5256
EXPECT_EQ(ret, UMF_RESULT_ERROR_NOT_SUPPORTED);
5357
}

0 commit comments

Comments
 (0)