Skip to content

Commit 0ae6e12

Browse files
committed
Add IPC tests
1 parent 7da2d6e commit 0ae6e12

File tree

4 files changed

+369
-0
lines changed

4 files changed

+369
-0
lines changed

src/cpp_helpers.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,11 @@ template <typename T> umf_memory_provider_ops_t providerOpsBase() {
9191
UMF_ASSIGN_OP(ops, T, purge_lazy, UMF_RESULT_ERROR_UNKNOWN);
9292
UMF_ASSIGN_OP(ops, T, purge_force, UMF_RESULT_ERROR_UNKNOWN);
9393
UMF_ASSIGN_OP(ops, T, get_name, "");
94+
UMF_ASSIGN_OP(ops.ipc, T, get_ipc_handle_size, UMF_RESULT_ERROR_UNKNOWN);
95+
UMF_ASSIGN_OP(ops.ipc, T, get_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
96+
UMF_ASSIGN_OP(ops.ipc, T, put_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
97+
UMF_ASSIGN_OP(ops.ipc, T, open_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
98+
UMF_ASSIGN_OP(ops.ipc, T, close_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
9499
return ops;
95100
}
96101
} // namespace detail

test/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -177,3 +177,7 @@ add_umf_test(NAME base_alloc_linear
177177
add_umf_test(NAME base_alloc_global
178178
SRCS ${BA_SOURCES_FOR_TEST} pools/pool_base_alloc.cpp malloc_compliance_tests.cpp
179179
LIBS umf_utils)
180+
181+
if (UMF_ENABLE_POOL_TRACKING)
182+
add_umf_test(NAME ipc SRCS ipcAPI.cpp)
183+
endif()

test/common/provider.hpp

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,24 @@ typedef struct provider_base_t {
6161
return UMF_RESULT_ERROR_UNKNOWN;
6262
}
6363
const char *get_name() noexcept { return "base"; }
64+
umf_result_t get_ipc_handle_size([[maybe_unused]] size_t *size) noexcept {
65+
return UMF_RESULT_ERROR_UNKNOWN;
66+
}
67+
umf_result_t get_ipc_handle([[maybe_unused]] const void *ptr,
68+
[[maybe_unused]] size_t size,
69+
[[maybe_unused]] void *ipcData) noexcept {
70+
return UMF_RESULT_ERROR_UNKNOWN;
71+
}
72+
umf_result_t put_ipc_handle([[maybe_unused]] void *ipcData) noexcept {
73+
return UMF_RESULT_ERROR_UNKNOWN;
74+
}
75+
umf_result_t open_ipc_handle([[maybe_unused]] void *ipcData,
76+
[[maybe_unused]] void **ptr) noexcept {
77+
return UMF_RESULT_ERROR_UNKNOWN;
78+
}
79+
umf_result_t close_ipc_handle([[maybe_unused]] void *ptr) noexcept {
80+
return UMF_RESULT_ERROR_UNKNOWN;
81+
}
6482
} provider_base_t;
6583

6684
umf_memory_provider_ops_t BASE_PROVIDER_OPS =

test/ipcAPI.cpp

Lines changed: 342 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,342 @@
1+
// Copyright (C) 2023 Intel Corporation
2+
// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT.
3+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
4+
// This file contains tests for UMF pool API
5+
6+
//#include "memoryPool.hpp"
7+
#include "pool.hpp"
8+
#include "provider.hpp"
9+
10+
#include <umf/ipc.h>
11+
#include <umf/memory_pool.h>
12+
#include <umf/pools/pool_proxy.h>
13+
14+
#include <array>
15+
#include <atomic>
16+
#include <cstdlib>
17+
#include <mutex>
18+
#include <numeric>
19+
#include <shared_mutex>
20+
#include <thread>
21+
#include <unordered_map>
22+
#include <vector>
23+
24+
struct provider_mock_ipc : public umf_test::provider_base_t {
25+
using allocations_map_type = std::unordered_map<const void *, size_t>;
26+
using allocations_mutex_type = std::shared_mutex;
27+
using allocations_read_lock_type = std::shared_lock<allocations_mutex_type>;
28+
using allocations_write_lock_type =
29+
std::unique_lock<allocations_mutex_type>;
30+
31+
struct ipc_data {
32+
const void *ptr;
33+
size_t size;
34+
};
35+
struct stats {
36+
std::atomic<size_t> getCount;
37+
std::atomic<size_t> putCount;
38+
std::atomic<size_t> openCount;
39+
std::atomic<size_t> closeCount;
40+
41+
stats() : getCount(0), putCount(0), openCount(0), closeCount(0) {}
42+
};
43+
44+
stats *stat = nullptr;
45+
umf_test::provider_malloc helper_prov;
46+
allocations_mutex_type alloc_mutex;
47+
allocations_map_type allocations;
48+
49+
umf_result_t initialize(stats *s) noexcept {
50+
stat = s;
51+
return UMF_RESULT_SUCCESS;
52+
}
53+
enum umf_result_t alloc(size_t size, size_t align, void **ptr) noexcept {
54+
auto ret = helper_prov.alloc(size, align, ptr);
55+
if (ret == UMF_RESULT_SUCCESS) {
56+
allocations_write_lock_type lock(alloc_mutex);
57+
auto [it, res] = allocations.emplace(*ptr, size);
58+
(void)it;
59+
EXPECT_TRUE(res);
60+
}
61+
return ret;
62+
}
63+
enum umf_result_t free(void *ptr, size_t size) noexcept {
64+
allocations_write_lock_type lock(alloc_mutex);
65+
allocations.erase(ptr);
66+
lock.unlock();
67+
auto ret = helper_prov.free(ptr, size);
68+
return ret;
69+
}
70+
const char *get_name() noexcept { return "mock_ipc"; }
71+
enum umf_result_t get_ipc_handle_size(size_t *size) noexcept {
72+
*size = sizeof(ipc_data);
73+
return UMF_RESULT_SUCCESS;
74+
}
75+
enum umf_result_t get_ipc_handle(const void *ptr, size_t size,
76+
void *ipcDataOpaque) noexcept {
77+
++stat->getCount;
78+
ipc_data *ipcData = static_cast<ipc_data *>(ipcDataOpaque);
79+
allocations_read_lock_type lock(alloc_mutex);
80+
auto it = allocations.find(ptr);
81+
if (it == allocations.end()) {
82+
// client tries to get handle for the pointer that does not match
83+
// with any of the base addresses allocated by the instance of
84+
// the memory provider
85+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
86+
}
87+
(void)size;
88+
ipcData->ptr = ptr;
89+
ipcData->size = it->second; // size of the base allocation
90+
return UMF_RESULT_SUCCESS;
91+
}
92+
enum umf_result_t put_ipc_handle(void *ipcData) noexcept {
93+
++stat->putCount;
94+
(void)ipcData;
95+
return UMF_RESULT_SUCCESS;
96+
}
97+
enum umf_result_t open_ipc_handle(void *ipcDataOpaque,
98+
void **ptr) noexcept {
99+
++stat->openCount;
100+
ipc_data *ipcData = static_cast<ipc_data *>(ipcDataOpaque);
101+
void *mapping = std::malloc(ipcData->size);
102+
if (!mapping) {
103+
return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
104+
}
105+
106+
memcpy(mapping, ipcData->ptr, ipcData->size);
107+
108+
*ptr = mapping;
109+
110+
return UMF_RESULT_SUCCESS;
111+
}
112+
enum umf_result_t close_ipc_handle(void *ptr) noexcept {
113+
++stat->closeCount;
114+
std::free(ptr);
115+
return UMF_RESULT_SUCCESS;
116+
}
117+
};
118+
119+
struct umfIpcTest : umf_test::test {
120+
umfIpcTest() : pool(nullptr, nullptr) {}
121+
void SetUp() override {
122+
test::SetUp();
123+
this->pool = makePool();
124+
}
125+
126+
void TearDown() override { test::TearDown(); }
127+
128+
umf::pool_unique_handle_t makePool() {
129+
// TODO: The function is similar to poolCreateExt function
130+
// from memoryPool.hpp
131+
umf_memory_provider_handle_t hProvider;
132+
umf_memory_pool_handle_t hPool;
133+
134+
auto ret =
135+
umfMemoryProviderCreate(&IPC_MOCK_PROVIDER_OPS, &stat, &hProvider);
136+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
137+
138+
ret = umfPoolCreate(umfProxyPoolOps(), hProvider, nullptr,
139+
UMF_POOL_CREATE_FLAG_OWN_PROVIDER, &hPool);
140+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
141+
142+
// capture provider and destroy it after the pool is destroyed
143+
auto poolDestructor = [](umf_memory_pool_handle_t pool) {
144+
umfPoolDestroy(pool);
145+
};
146+
147+
return umf::pool_unique_handle_t(hPool, std::move(poolDestructor));
148+
}
149+
150+
using stats_type = typename provider_mock_ipc::stats;
151+
umf_memory_provider_ops_t IPC_MOCK_PROVIDER_OPS =
152+
umf::providerMakeCOps<provider_mock_ipc, stats_type>();
153+
umf::pool_unique_handle_t pool;
154+
static constexpr int NTHREADS = 10;
155+
stats_type stat;
156+
};
157+
158+
TEST_F(umfIpcTest, BasicFlow) {
159+
constexpr size_t SIZE = 100;
160+
int *ptr = (int *)umfPoolMalloc(pool.get(), SIZE * sizeof(int));
161+
EXPECT_NE(ptr, nullptr);
162+
163+
std::iota(ptr, ptr + SIZE, 0);
164+
165+
umf_ipc_handle_t ipcHandleFull = nullptr;
166+
size_t handleFullSize = 0;
167+
umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandleFull, &handleFullSize);
168+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
169+
umf_ipc_handle_t ipcHandleHalf = nullptr;
170+
size_t handleHalfSize = 0;
171+
ret = umfGetIPCHandle(ptr + SIZE / 2, &ipcHandleHalf, &handleHalfSize);
172+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
173+
ASSERT_EQ(handleFullSize, handleHalfSize);
174+
175+
void *fullArray = nullptr;
176+
ret = umfOpenIPCHandle(pool.get(), ipcHandleFull, &fullArray);
177+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
178+
179+
void *halfArray = nullptr;
180+
ret = umfOpenIPCHandle(pool.get(), ipcHandleHalf, &halfArray);
181+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
182+
183+
for (int i = 0; i < (int)SIZE; ++i) {
184+
ASSERT_EQ(reinterpret_cast<int *>(fullArray)[i], i);
185+
}
186+
// Close fullArray before reading halfArray
187+
ret = umfCloseIPCHandle(fullArray);
188+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
189+
190+
for (int i = 0; i < (int)SIZE / 2; ++i) {
191+
ASSERT_EQ(reinterpret_cast<int *>(halfArray)[i], i + SIZE / 2);
192+
}
193+
ret = umfCloseIPCHandle(halfArray);
194+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
195+
196+
ret = umfPutIPCHandle(ipcHandleFull);
197+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
198+
199+
ret = umfPutIPCHandle(ipcHandleHalf);
200+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
201+
202+
ret = umfPoolFree(pool.get(), ptr);
203+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
204+
205+
EXPECT_EQ(stat.getCount, 1);
206+
EXPECT_EQ(stat.putCount, stat.getCount);
207+
// EXPECT_EQ(stat.openCount, 1);
208+
EXPECT_EQ(stat.closeCount, stat.openCount);
209+
}
210+
211+
TEST_F(umfIpcTest, ConcurrentGetPutHandles) {
212+
std::vector<void *> ptrs;
213+
constexpr size_t ALLOC_SIZE = 100;
214+
constexpr size_t NUM_POINTERS = 100;
215+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
216+
void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE);
217+
EXPECT_NE(ptr, nullptr);
218+
ptrs.push_back(ptr);
219+
}
220+
221+
std::array<std::vector<umf_ipc_handle_t>, NTHREADS> ipcHandles;
222+
223+
auto getHandlesFn = [&ipcHandles, &ptrs](size_t tid) {
224+
// TODO: better to wait on the barrier here so that every thread
225+
// starts at the same point. But std::barrier is available only
226+
// starting from C++20
227+
for (void *ptr : ptrs) {
228+
umf_ipc_handle_t ipcHandle;
229+
size_t handleSize;
230+
umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize);
231+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
232+
ipcHandles[tid].push_back(ipcHandle);
233+
}
234+
};
235+
236+
std::vector<std::thread> threads;
237+
for (int i = 0; i < NTHREADS; i++) {
238+
threads.emplace_back(getHandlesFn, i);
239+
}
240+
241+
for (auto &thread : threads) {
242+
thread.join();
243+
}
244+
threads.clear();
245+
246+
auto putHandlesFn = [&ipcHandles](size_t tid) {
247+
for (umf_ipc_handle_t ipcHandle : ipcHandles[tid]) {
248+
umf_result_t ret = umfPutIPCHandle(ipcHandle);
249+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
250+
}
251+
};
252+
253+
for (int i = 0; i < NTHREADS; i++) {
254+
threads.emplace_back(putHandlesFn, i);
255+
}
256+
257+
for (auto &thread : threads) {
258+
thread.join();
259+
}
260+
threads.clear();
261+
262+
for (void *ptr : ptrs) {
263+
umf_result_t ret = umfPoolFree(pool.get(), ptr);
264+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
265+
}
266+
267+
EXPECT_GE(stat.getCount, NUM_POINTERS);
268+
EXPECT_LE(stat.getCount, NUM_POINTERS * NTHREADS);
269+
EXPECT_EQ(stat.putCount, stat.getCount);
270+
}
271+
272+
TEST_F(umfIpcTest, ConcurrentOpenCloseHandles) {
273+
std::vector<void *> ptrs;
274+
constexpr size_t ALLOC_SIZE = 100;
275+
constexpr size_t NUM_POINTERS = 100;
276+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
277+
void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE);
278+
EXPECT_NE(ptr, nullptr);
279+
ptrs.push_back(ptr);
280+
}
281+
282+
std::array<umf_ipc_handle_t, NUM_POINTERS> ipcHandles;
283+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
284+
umf_ipc_handle_t ipcHandle;
285+
size_t handleSize;
286+
umf_result_t ret = umfGetIPCHandle(ptrs[i], &ipcHandle, &handleSize);
287+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
288+
ipcHandles[i] = ipcHandle;
289+
}
290+
291+
std::array<std::vector<void *>, NTHREADS> openedIpcHandles;
292+
293+
auto openHandlesFn = [this, &ipcHandles, &openedIpcHandles](size_t tid) {
294+
// TODO: better to wait on the barrier here so that every thread
295+
// starts at the same point. But std::barrier is available only
296+
// starting from C++20
297+
for (auto ipcHandle : ipcHandles) {
298+
void *ptr;
299+
umf_result_t ret = umfOpenIPCHandle(pool.get(), ipcHandle, &ptr);
300+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
301+
openedIpcHandles[tid].push_back(ptr);
302+
}
303+
};
304+
305+
std::vector<std::thread> threads;
306+
for (int i = 0; i < NTHREADS; i++) {
307+
threads.emplace_back(openHandlesFn, i);
308+
}
309+
310+
for (auto &thread : threads) {
311+
thread.join();
312+
}
313+
threads.clear();
314+
315+
auto closeHandlesFn = [&openedIpcHandles](size_t tid) {
316+
for (void *ptr : openedIpcHandles[tid]) {
317+
umf_result_t ret = umfCloseIPCHandle(ptr);
318+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
319+
}
320+
};
321+
322+
for (int i = 0; i < NTHREADS; i++) {
323+
threads.emplace_back(closeHandlesFn, i);
324+
}
325+
326+
for (auto &thread : threads) {
327+
thread.join();
328+
}
329+
threads.clear();
330+
331+
for (auto ipcHandle : ipcHandles) {
332+
umf_result_t ret = umfPutIPCHandle(ipcHandle);
333+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
334+
}
335+
336+
for (void *ptr : ptrs) {
337+
umf_result_t ret = umfPoolFree(pool.get(), ptr);
338+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
339+
}
340+
341+
EXPECT_EQ(stat.openCount, stat.closeCount);
342+
}

0 commit comments

Comments
 (0)