Skip to content

Commit 4dc141a

Browse files
committed
Add IPC tests
1 parent 5aa9f9f commit 4dc141a

File tree

4 files changed

+362
-0
lines changed

4 files changed

+362
-0
lines changed

src/cpp_helpers.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,11 @@ template <typename T> umf_memory_provider_ops_t providerOpsBase() {
9090
UMF_ASSIGN_OP(ops, T, get_min_page_size, UMF_RESULT_ERROR_UNKNOWN);
9191
UMF_ASSIGN_OP(ops, T, purge_lazy, UMF_RESULT_ERROR_UNKNOWN);
9292
UMF_ASSIGN_OP(ops, T, purge_force, UMF_RESULT_ERROR_UNKNOWN);
93+
UMF_ASSIGN_OP(ops, T, get_ipc_handle_size, UMF_RESULT_ERROR_UNKNOWN);
94+
UMF_ASSIGN_OP(ops, T, get_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
95+
UMF_ASSIGN_OP(ops, T, put_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
96+
UMF_ASSIGN_OP(ops, T, open_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
97+
UMF_ASSIGN_OP(ops, T, close_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
9398
UMF_ASSIGN_OP(ops, T, get_name, "");
9499
return ops;
95100
}

test/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,3 +121,7 @@ if(LINUX)
121121
SRCS test_base_alloc_linear.c
122122
LIBS umf_utils)
123123
endif()
124+
125+
if (UMF_ENABLE_POOL_TRACKING)
126+
add_umf_test(NAME ipc SRCS ipcAPI.cpp)
127+
endif()

test/common/provider.hpp

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,24 @@ typedef struct provider_base_t {
6060
[[maybe_unused]] size_t size) noexcept {
6161
return UMF_RESULT_ERROR_UNKNOWN;
6262
}
63+
umf_result_t get_ipc_handle_size([[maybe_unused]] size_t *size) noexcept {
64+
return UMF_RESULT_ERROR_UNKNOWN;
65+
}
66+
umf_result_t get_ipc_handle([[maybe_unused]] const void *ptr,
67+
[[maybe_unused]] size_t size,
68+
[[maybe_unused]] void *ipcData) noexcept {
69+
return UMF_RESULT_ERROR_UNKNOWN;
70+
}
71+
umf_result_t put_ipc_handle([[maybe_unused]] void *ipcData) noexcept {
72+
return UMF_RESULT_ERROR_UNKNOWN;
73+
}
74+
umf_result_t open_ipc_handle([[maybe_unused]] void *ipcData,
75+
[[maybe_unused]] void **ptr) noexcept {
76+
return UMF_RESULT_ERROR_UNKNOWN;
77+
}
78+
umf_result_t close_ipc_handle([[maybe_unused]] void *ptr) noexcept {
79+
return UMF_RESULT_ERROR_UNKNOWN;
80+
}
6381
const char *get_name() noexcept { return "base"; }
6482
} provider_base_t;
6583

test/ipcAPI.cpp

Lines changed: 335 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,335 @@
1+
// Copyright (C) 2023 Intel Corporation
2+
// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT.
3+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
4+
// This file contains tests for UMF pool API
5+
6+
//#include "memoryPool.hpp"
7+
#include "pool.hpp"
8+
#include "provider.hpp"
9+
10+
#include "umf/ipc.h"
11+
#include "umf/memory_pool.h"
12+
13+
#include <array>
14+
#include <atomic>
15+
#include <cstdlib>
16+
#include <mutex>
17+
#include <numeric>
18+
#include <shared_mutex>
19+
#include <thread>
20+
#include <unordered_map>
21+
#include <vector>
22+
23+
struct provider_mock_ipc : public umf_test::provider_base_t {
24+
using allocations_map_type = std::unordered_map<const void *, size_t>;
25+
using allocations_mutex_type = std::shared_mutex;
26+
using allocations_read_lock_type = std::shared_lock<allocations_mutex_type>;
27+
using allocations_write_lock_type =
28+
std::unique_lock<allocations_mutex_type>;
29+
30+
struct ipc_data {
31+
const void *ptr;
32+
size_t size;
33+
};
34+
struct stats {
35+
std::atomic<size_t> getCount;
36+
std::atomic<size_t> putCount;
37+
std::atomic<size_t> openCount;
38+
std::atomic<size_t> closeCount;
39+
40+
stats() : getCount(0), putCount(0), openCount(0), closeCount(0) {}
41+
};
42+
43+
stats *stat = nullptr;
44+
umf_test::provider_malloc helper_prov;
45+
allocations_mutex_type alloc_mutex;
46+
allocations_map_type allocations;
47+
48+
umf_result_t initialize(stats *s) noexcept {
49+
stat = s;
50+
return UMF_RESULT_SUCCESS;
51+
}
52+
enum umf_result_t alloc(size_t size, size_t align, void **ptr) noexcept {
53+
auto ret = helper_prov.alloc(size, align, ptr);
54+
if (ret == UMF_RESULT_SUCCESS) {
55+
allocations_write_lock_type lock(alloc_mutex);
56+
auto [it, res] = allocations.emplace(*ptr, size);
57+
(void)it;
58+
EXPECT_TRUE(res);
59+
}
60+
return ret;
61+
}
62+
enum umf_result_t free(void *ptr, size_t size) noexcept {
63+
auto ret = helper_prov.free(ptr, size);
64+
allocations_write_lock_type lock(alloc_mutex);
65+
allocations.erase(ptr);
66+
return ret;
67+
}
68+
const char *get_name() noexcept { return "mock_ipc"; }
69+
enum umf_result_t get_ipc_handle_size(size_t *size) noexcept {
70+
*size = sizeof(ipc_data);
71+
return UMF_RESULT_SUCCESS;
72+
}
73+
enum umf_result_t get_ipc_handle(const void *ptr, size_t size,
74+
void *ipcDataOpaque) noexcept {
75+
++stat->getCount;
76+
ipc_data *ipcData = static_cast<ipc_data *>(ipcDataOpaque);
77+
allocations_read_lock_type lock(alloc_mutex);
78+
auto it = allocations.find(ptr);
79+
if (it == allocations.end()) {
80+
// client tries to get handle for the pointer that does not match
81+
// with any of the base addresses allocated by the instance of
82+
// the memory provider
83+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
84+
}
85+
(void)size;
86+
ipcData->ptr = ptr;
87+
ipcData->size = it->second; // size of the base allocation
88+
return UMF_RESULT_SUCCESS;
89+
}
90+
enum umf_result_t put_ipc_handle(void *ipcData) noexcept {
91+
++stat->putCount;
92+
(void)ipcData;
93+
return UMF_RESULT_SUCCESS;
94+
}
95+
enum umf_result_t open_ipc_handle(void *ipcDataOpaque,
96+
void **ptr) noexcept {
97+
++stat->openCount;
98+
ipc_data *ipcData = static_cast<ipc_data *>(ipcDataOpaque);
99+
void *mapping = std::malloc(ipcData->size);
100+
if (!mapping) {
101+
return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
102+
}
103+
104+
memcpy(mapping, ipcData->ptr, ipcData->size);
105+
106+
*ptr = mapping;
107+
108+
return UMF_RESULT_SUCCESS;
109+
}
110+
enum umf_result_t close_ipc_handle(void *ptr) noexcept {
111+
++stat->closeCount;
112+
std::free(ptr);
113+
return UMF_RESULT_SUCCESS;
114+
}
115+
};
116+
117+
struct umfIpcTest : umf_test::test {
118+
umfIpcTest() : pool(nullptr, nullptr) {}
119+
void SetUp() override {
120+
test::SetUp();
121+
this->pool = makePool();
122+
}
123+
124+
void TearDown() override { test::TearDown(); }
125+
126+
umf::pool_unique_handle_t makePool() {
127+
// TODO: The function is similar to poolCreateExt function
128+
// from memoryPool.hpp
129+
umf_memory_provider_handle_t hProvider;
130+
umf_memory_pool_handle_t hPool;
131+
132+
auto ret =
133+
umfMemoryProviderCreate(&IPC_MOCK_PROVIDER_OPS, &stat, &hProvider);
134+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
135+
136+
ret = umfPoolCreate(&umf_test::PROXY_POOL_OPS, hProvider, nullptr,
137+
UMF_POOL_CREATE_FLAG_OWN_PROVIDER, &hPool);
138+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
139+
140+
// capture provider and destroy it after the pool is destroyed
141+
auto poolDestructor = [](umf_memory_pool_handle_t pool) {
142+
umfPoolDestroy(pool);
143+
};
144+
145+
return umf::pool_unique_handle_t(hPool, std::move(poolDestructor));
146+
}
147+
148+
using stats_type = typename provider_mock_ipc::stats;
149+
umf_memory_provider_ops_t IPC_MOCK_PROVIDER_OPS =
150+
umf::providerMakeCOps<provider_mock_ipc, stats_type>();
151+
umf::pool_unique_handle_t pool;
152+
static constexpr int NTHREADS = 10;
153+
stats_type stat;
154+
};
155+
156+
TEST_F(umfIpcTest, BasicFlow) {
157+
constexpr size_t SIZE = 100;
158+
int *ptr = (int *)umfPoolMalloc(pool.get(), SIZE * sizeof(int));
159+
EXPECT_NE(ptr, nullptr);
160+
161+
std::iota(ptr, ptr + SIZE, 0);
162+
163+
umf_ipc_handle_t ipcHandleFull = nullptr;
164+
size_t handleFullSize = 0;
165+
umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandleFull, &handleFullSize);
166+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
167+
umf_ipc_handle_t ipcHandleHalf = nullptr;
168+
size_t handleHalfSize = 0;
169+
ret = umfGetIPCHandle(ptr + SIZE / 2, &ipcHandleHalf, &handleHalfSize);
170+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
171+
ASSERT_EQ(handleFullSize, handleHalfSize);
172+
173+
void *fullArray = nullptr;
174+
ret = umfOpenIPCHandle(pool.get(), ipcHandleFull, &fullArray);
175+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
176+
177+
void *halfArray = nullptr;
178+
ret = umfOpenIPCHandle(pool.get(), ipcHandleHalf, &halfArray);
179+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
180+
181+
for (int i = 0; i < (int)SIZE; ++i) {
182+
ASSERT_EQ(reinterpret_cast<int *>(fullArray)[i], i);
183+
}
184+
// Close fullArray before reading halfArray
185+
ret = umfCloseIPCHandle(fullArray);
186+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
187+
188+
for (int i = 0; i < (int)SIZE / 2; ++i) {
189+
ASSERT_EQ(reinterpret_cast<int *>(halfArray)[i], i + SIZE / 2);
190+
}
191+
ret = umfCloseIPCHandle(halfArray);
192+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
193+
194+
ret = umfPutIPCHandle(ipcHandleFull);
195+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
196+
197+
ret = umfPutIPCHandle(ipcHandleHalf);
198+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
199+
200+
ret = umfPoolFree(pool.get(), ptr);
201+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
202+
203+
EXPECT_EQ(stat.getCount, 1);
204+
EXPECT_EQ(stat.putCount, stat.getCount);
205+
// EXPECT_EQ(stat.openCount, 1);
206+
EXPECT_EQ(stat.closeCount, stat.openCount);
207+
}
208+
209+
TEST_F(umfIpcTest, ConcurrentGetPutHandles) {
210+
std::vector<void *> ptrs;
211+
constexpr size_t ALLOC_SIZE = 100;
212+
constexpr size_t NUM_POINTERS = 100;
213+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
214+
void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE);
215+
EXPECT_NE(ptr, nullptr);
216+
ptrs.push_back(ptr);
217+
}
218+
219+
std::array<std::vector<umf_ipc_handle_t>, NTHREADS> ipcHandles;
220+
221+
auto getHandlesFn = [&ipcHandles, &ptrs](size_t tid) {
222+
// TODO: better to wait on the barrier here so that every thread
223+
// starts at the same point. But std::barrier is available only
224+
// starting from C++20
225+
for (void *ptr : ptrs) {
226+
umf_ipc_handle_t ipcHandle;
227+
size_t handleSize;
228+
umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize);
229+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
230+
ipcHandles[tid].push_back(ipcHandle);
231+
}
232+
};
233+
234+
std::vector<std::thread> threads;
235+
for (int i = 0; i < NTHREADS; i++) {
236+
threads.emplace_back(getHandlesFn, i);
237+
}
238+
239+
for (auto &thread : threads) {
240+
thread.join();
241+
}
242+
threads.clear();
243+
244+
auto putHandlesFn = [&ipcHandles](size_t tid) {
245+
for (umf_ipc_handle_t ipcHandle : ipcHandles[tid]) {
246+
umf_result_t ret = umfPutIPCHandle(ipcHandle);
247+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
248+
}
249+
};
250+
251+
for (int i = 0; i < NTHREADS; i++) {
252+
threads.emplace_back(putHandlesFn, i);
253+
}
254+
255+
for (auto &thread : threads) {
256+
thread.join();
257+
}
258+
threads.clear();
259+
260+
for (void *ptr : ptrs) {
261+
umf_result_t ret = umfPoolFree(pool.get(), ptr);
262+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
263+
}
264+
265+
EXPECT_GE(stat.getCount, NUM_POINTERS);
266+
EXPECT_LE(stat.getCount, NUM_POINTERS * NTHREADS);
267+
EXPECT_EQ(stat.putCount, stat.getCount);
268+
}
269+
270+
TEST_F(umfIpcTest, ConcurrentOpenCloseHandles) {
271+
std::vector<void *> ptrs;
272+
constexpr size_t ALLOC_SIZE = 100;
273+
constexpr size_t NUM_POINTERS = 100;
274+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
275+
void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE);
276+
EXPECT_NE(ptr, nullptr);
277+
ptrs.push_back(ptr);
278+
}
279+
280+
std::array<umf_ipc_handle_t, NUM_POINTERS> ipcHandles;
281+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
282+
umf_ipc_handle_t ipcHandle;
283+
size_t handleSize;
284+
umf_result_t ret = umfGetIPCHandle(ptrs[i], &ipcHandle, &handleSize);
285+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
286+
ipcHandles[i] = ipcHandle;
287+
}
288+
289+
std::array<std::vector<void *>, NTHREADS> openedIpcHandles;
290+
291+
auto openHandlesFn = [this, &ipcHandles, &openedIpcHandles](size_t tid) {
292+
// TODO: better to wait on the barrier here so that every thread
293+
// starts at the same point. But std::barrier is available only
294+
// starting from C++20
295+
for (auto ipcHandle : ipcHandles) {
296+
void *ptr;
297+
umf_result_t ret = umfOpenIPCHandle(pool.get(), ipcHandle, &ptr);
298+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
299+
openedIpcHandles[tid].push_back(ptr);
300+
}
301+
};
302+
303+
std::vector<std::thread> threads;
304+
for (int i = 0; i < NTHREADS; i++) {
305+
threads.emplace_back(openHandlesFn, i);
306+
}
307+
308+
for (auto &thread : threads) {
309+
thread.join();
310+
}
311+
threads.clear();
312+
313+
auto closeHandlesFn = [&openedIpcHandles](size_t tid) {
314+
for (void *ptr : openedIpcHandles[tid]) {
315+
umf_result_t ret = umfCloseIPCHandle(ptr);
316+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
317+
}
318+
};
319+
320+
for (int i = 0; i < NTHREADS; i++) {
321+
threads.emplace_back(closeHandlesFn, i);
322+
}
323+
324+
for (auto &thread : threads) {
325+
thread.join();
326+
}
327+
threads.clear();
328+
329+
for (void *ptr : ptrs) {
330+
umf_result_t ret = umfPoolFree(pool.get(), ptr);
331+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
332+
}
333+
334+
EXPECT_EQ(stat.openCount, stat.closeCount);
335+
}

0 commit comments

Comments
 (0)