Skip to content

Commit 6789c2c

Browse files
committed
Add IPC tests
1 parent c02f556 commit 6789c2c

File tree

4 files changed

+362
-0
lines changed

4 files changed

+362
-0
lines changed

src/common/umf_helpers.hpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,11 @@ template <typename T> umf_memory_provider_ops_t providerOpsBase() {
9090
UMF_ASSIGN_OP(ops, T, get_min_page_size, UMF_RESULT_ERROR_UNKNOWN);
9191
UMF_ASSIGN_OP(ops, T, purge_lazy, UMF_RESULT_ERROR_UNKNOWN);
9292
UMF_ASSIGN_OP(ops, T, purge_force, UMF_RESULT_ERROR_UNKNOWN);
93+
UMF_ASSIGN_OP(ops, T, get_ipc_handle_size, UMF_RESULT_ERROR_UNKNOWN);
94+
UMF_ASSIGN_OP(ops, T, get_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
95+
UMF_ASSIGN_OP(ops, T, put_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
96+
UMF_ASSIGN_OP(ops, T, open_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
97+
UMF_ASSIGN_OP(ops, T, close_ipc_handle, UMF_RESULT_ERROR_UNKNOWN);
9398
UMF_ASSIGN_OP(ops, T, get_name, "");
9499
return ops;
95100
}

test/CMakeLists.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,3 +72,6 @@ add_umf_test(NAME memory_pool_internal SRCS memory_pool_internal.cpp)
7272
if(LINUX) # OS-specific functions are implemented only for Linux now
7373
add_umf_test(NAME provider_os_memory SRCS provider_os_memory.cpp LIBS numa)
7474
endif()
75+
if (UMF_ENABLE_POOL_TRACKING)
76+
add_umf_test(NAME ipc SRCS ipcAPI.cpp)
77+
endif()

test/common/provider.hpp

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,24 @@ typedef struct provider_base_t {
5959
[[maybe_unused]] size_t size) noexcept {
6060
return UMF_RESULT_ERROR_UNKNOWN;
6161
}
62+
umf_result_t get_ipc_handle_size([[maybe_unused]] size_t *size) noexcept {
63+
return UMF_RESULT_ERROR_UNKNOWN;
64+
}
65+
umf_result_t get_ipc_handle([[maybe_unused]] const void *ptr,
66+
[[maybe_unused]] size_t size,
67+
[[maybe_unused]] void *ipcData) noexcept {
68+
return UMF_RESULT_ERROR_UNKNOWN;
69+
}
70+
umf_result_t put_ipc_handle([[maybe_unused]] void *ipcData) noexcept {
71+
return UMF_RESULT_ERROR_UNKNOWN;
72+
}
73+
umf_result_t open_ipc_handle([[maybe_unused]] void *ipcData,
74+
[[maybe_unused]] void **ptr) noexcept {
75+
return UMF_RESULT_ERROR_UNKNOWN;
76+
}
77+
umf_result_t close_ipc_handle([[maybe_unused]] void *ptr) noexcept {
78+
return UMF_RESULT_ERROR_UNKNOWN;
79+
}
6280
const char *get_name() noexcept { return "base"; }
6381
} provider_base_t;
6482

test/ipcAPI.cpp

Lines changed: 336 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,336 @@
1+
// Copyright (C) 2023 Intel Corporation
2+
// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT.
3+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
4+
// This file contains tests for UMF pool API
5+
6+
//#include "memoryPool.hpp"
7+
#include "pool.hpp"
8+
#include "provider.hpp"
9+
10+
#include "umf/ipc.h"
11+
#include "umf/memory_pool.h"
12+
13+
#include <array>
14+
#include <atomic>
15+
#include <cstdlib>
16+
#include <mutex>
17+
#include <numeric>
18+
#include <shared_mutex>
19+
#include <thread>
20+
#include <unordered_map>
21+
#include <vector>
22+
23+
struct provider_mock_ipc : public umf_test::provider_base_t {
24+
using allocations_map_type = std::unordered_map<const void *, size_t>;
25+
using allocations_mutex_type = std::shared_mutex;
26+
using allocations_read_lock_type = std::shared_lock<allocations_mutex_type>;
27+
using allocations_write_lock_type =
28+
std::unique_lock<allocations_mutex_type>;
29+
30+
struct ipc_data {
31+
const void *ptr;
32+
size_t size;
33+
};
34+
struct stats {
35+
std::atomic<size_t> getCount;
36+
std::atomic<size_t> putCount;
37+
std::atomic<size_t> openCount;
38+
std::atomic<size_t> closeCount;
39+
40+
stats() : getCount(0), putCount(0), openCount(0), closeCount(0) {}
41+
};
42+
43+
stats *stat = nullptr;
44+
umf_test::provider_malloc helper_prov;
45+
allocations_mutex_type alloc_mutex;
46+
allocations_map_type allocations;
47+
48+
umf_result_t initialize(stats *s) noexcept {
49+
stat = s;
50+
return UMF_RESULT_SUCCESS;
51+
}
52+
enum umf_result_t alloc(size_t size, size_t align, void **ptr) noexcept {
53+
auto ret = helper_prov.alloc(size, align, ptr);
54+
if (ret == UMF_RESULT_SUCCESS) {
55+
allocations_write_lock_type lock(alloc_mutex);
56+
auto [it, res] = allocations.emplace(*ptr, size);
57+
(void)it;
58+
EXPECT_TRUE(res);
59+
}
60+
return ret;
61+
}
62+
enum umf_result_t free(void *ptr, size_t size) noexcept {
63+
auto ret = helper_prov.free(ptr, size);
64+
allocations_write_lock_type lock(alloc_mutex);
65+
allocations.erase(ptr);
66+
return ret;
67+
}
68+
const char *get_name() noexcept { return "mock_ipc"; }
69+
enum umf_result_t get_ipc_handle_size(size_t *size) noexcept {
70+
*size = sizeof(ipc_data);
71+
return UMF_RESULT_SUCCESS;
72+
}
73+
enum umf_result_t get_ipc_handle(const void *ptr, size_t size,
74+
void *ipcDataOpaque) noexcept {
75+
++stat->getCount;
76+
ipc_data *ipcData = static_cast<ipc_data *>(ipcDataOpaque);
77+
allocations_read_lock_type lock(alloc_mutex);
78+
auto it = allocations.find(ptr);
79+
if (it == allocations.end()) {
80+
// client tries to get handle for the pointer that does not match
81+
// with any of the base addresses allocated by the instance of
82+
// the memory provider
83+
return UMF_RESULT_ERROR_INVALID_ARGUMENT;
84+
}
85+
(void)size;
86+
ipcData->ptr = ptr;
87+
ipcData->size = it->second; // size of the base allocation
88+
return UMF_RESULT_SUCCESS;
89+
}
90+
enum umf_result_t put_ipc_handle(void *ipcData) noexcept {
91+
++stat->putCount;
92+
(void)ipcData;
93+
return UMF_RESULT_SUCCESS;
94+
}
95+
enum umf_result_t open_ipc_handle(void *ipcDataOpaque,
96+
void **ptr) noexcept {
97+
++stat->openCount;
98+
ipc_data *ipcData = static_cast<ipc_data *>(ipcDataOpaque);
99+
void *mapping = std::malloc(ipcData->size);
100+
if (!mapping) {
101+
return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY;
102+
}
103+
104+
memcpy(mapping, ipcData->ptr, ipcData->size);
105+
106+
*ptr = mapping;
107+
108+
return UMF_RESULT_SUCCESS;
109+
}
110+
enum umf_result_t close_ipc_handle(void *ptr) noexcept {
111+
++stat->closeCount;
112+
std::free(ptr);
113+
return UMF_RESULT_SUCCESS;
114+
}
115+
};
116+
117+
struct umfIpcTest : umf_test::test {
118+
umfIpcTest() : pool(nullptr, nullptr) {}
119+
void SetUp() override {
120+
test::SetUp();
121+
this->pool = makePool();
122+
}
123+
124+
void TearDown() override { test::TearDown(); }
125+
126+
umf::pool_unique_handle_t makePool() {
127+
// TODO: The function is similar to poolCreateExt function
128+
// from memoryPool.hpp
129+
umf_memory_provider_handle_t hProvider;
130+
umf_memory_pool_handle_t hPool;
131+
132+
auto ret =
133+
umfMemoryProviderCreate(&IPC_MOCK_PROVIDER_OPS, &stat, &hProvider);
134+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
135+
136+
ret = umfPoolCreate(&umf_test::PROXY_POOL_OPS, hProvider, nullptr,
137+
&hPool);
138+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
139+
140+
// capture provider and destroy it after the pool is destroyed
141+
auto poolDestructor = [hProvider](umf_memory_pool_handle_t pool) {
142+
umfPoolDestroy(pool);
143+
umfMemoryProviderDestroy(hProvider);
144+
};
145+
146+
return umf::pool_unique_handle_t(hPool, std::move(poolDestructor));
147+
}
148+
149+
using stats_type = typename provider_mock_ipc::stats;
150+
umf_memory_provider_ops_t IPC_MOCK_PROVIDER_OPS =
151+
umf::providerMakeCOps<provider_mock_ipc, stats_type>();
152+
umf::pool_unique_handle_t pool;
153+
static constexpr int NTHREADS = 10;
154+
stats_type stat;
155+
};
156+
157+
TEST_F(umfIpcTest, BasicFlow) {
158+
constexpr size_t SIZE = 100;
159+
int *ptr = (int *)umfPoolMalloc(pool.get(), SIZE * sizeof(int));
160+
EXPECT_NE(ptr, nullptr);
161+
162+
std::iota(ptr, ptr + SIZE, 0);
163+
164+
umf_ipc_handle_t ipcHandleFull = nullptr;
165+
size_t handleFullSize = 0;
166+
umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandleFull, &handleFullSize);
167+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
168+
umf_ipc_handle_t ipcHandleHalf = nullptr;
169+
size_t handleHalfSize = 0;
170+
ret = umfGetIPCHandle(ptr + SIZE / 2, &ipcHandleHalf, &handleHalfSize);
171+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
172+
ASSERT_EQ(handleFullSize, handleHalfSize);
173+
174+
void *fullArray = nullptr;
175+
ret = umfOpenIPCHandle(pool.get(), ipcHandleFull, &fullArray);
176+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
177+
178+
void *halfArray = nullptr;
179+
ret = umfOpenIPCHandle(pool.get(), ipcHandleHalf, &halfArray);
180+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
181+
182+
for (int i = 0; i < (int)SIZE; ++i) {
183+
ASSERT_EQ(reinterpret_cast<int *>(fullArray)[i], i);
184+
}
185+
// Close fullArray before reading halfArray
186+
ret = umfCloseIPCHandle(fullArray);
187+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
188+
189+
for (int i = 0; i < (int)SIZE / 2; ++i) {
190+
ASSERT_EQ(reinterpret_cast<int *>(halfArray)[i], i + SIZE / 2);
191+
}
192+
ret = umfCloseIPCHandle(halfArray);
193+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
194+
195+
ret = umfPutIPCHandle(ipcHandleFull);
196+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
197+
198+
ret = umfPutIPCHandle(ipcHandleHalf);
199+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
200+
201+
ret = umfPoolFree(pool.get(), ptr);
202+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
203+
204+
EXPECT_EQ(stat.getCount, 1);
205+
EXPECT_EQ(stat.putCount, stat.getCount);
206+
// EXPECT_EQ(stat.openCount, 1);
207+
EXPECT_EQ(stat.closeCount, stat.openCount);
208+
}
209+
210+
TEST_F(umfIpcTest, ConcurrentGetPutHandles) {
211+
std::vector<void *> ptrs;
212+
constexpr size_t ALLOC_SIZE = 100;
213+
constexpr size_t NUM_POINTERS = 100;
214+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
215+
void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE);
216+
EXPECT_NE(ptr, nullptr);
217+
ptrs.push_back(ptr);
218+
}
219+
220+
std::array<std::vector<umf_ipc_handle_t>, NTHREADS> ipcHandles;
221+
222+
auto getHandlesFn = [&ipcHandles, &ptrs](size_t tid) {
223+
// TODO: better to wait on the barrier here so that every thread
224+
// starts at the same point. But std::barrier is available only
225+
// starting from C++20
226+
for (void *ptr : ptrs) {
227+
umf_ipc_handle_t ipcHandle;
228+
size_t handleSize;
229+
umf_result_t ret = umfGetIPCHandle(ptr, &ipcHandle, &handleSize);
230+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
231+
ipcHandles[tid].push_back(ipcHandle);
232+
}
233+
};
234+
235+
std::vector<std::thread> threads;
236+
for (int i = 0; i < NTHREADS; i++) {
237+
threads.emplace_back(getHandlesFn, i);
238+
}
239+
240+
for (auto &thread : threads) {
241+
thread.join();
242+
}
243+
threads.clear();
244+
245+
auto putHandlesFn = [&ipcHandles](size_t tid) {
246+
for (umf_ipc_handle_t ipcHandle : ipcHandles[tid]) {
247+
umf_result_t ret = umfPutIPCHandle(ipcHandle);
248+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
249+
}
250+
};
251+
252+
for (int i = 0; i < NTHREADS; i++) {
253+
threads.emplace_back(putHandlesFn, i);
254+
}
255+
256+
for (auto &thread : threads) {
257+
thread.join();
258+
}
259+
threads.clear();
260+
261+
for (void *ptr : ptrs) {
262+
umf_result_t ret = umfPoolFree(pool.get(), ptr);
263+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
264+
}
265+
266+
EXPECT_GE(stat.getCount, NUM_POINTERS);
267+
EXPECT_LE(stat.getCount, NUM_POINTERS * NTHREADS);
268+
EXPECT_EQ(stat.putCount, stat.getCount);
269+
}
270+
271+
TEST_F(umfIpcTest, ConcurrentOpenCloseHandles) {
272+
std::vector<void *> ptrs;
273+
constexpr size_t ALLOC_SIZE = 100;
274+
constexpr size_t NUM_POINTERS = 100;
275+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
276+
void *ptr = umfPoolMalloc(pool.get(), ALLOC_SIZE);
277+
EXPECT_NE(ptr, nullptr);
278+
ptrs.push_back(ptr);
279+
}
280+
281+
std::array<umf_ipc_handle_t, NUM_POINTERS> ipcHandles;
282+
for (size_t i = 0; i < NUM_POINTERS; ++i) {
283+
umf_ipc_handle_t ipcHandle;
284+
size_t handleSize;
285+
umf_result_t ret = umfGetIPCHandle(ptrs[i], &ipcHandle, &handleSize);
286+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
287+
ipcHandles[i] = ipcHandle;
288+
}
289+
290+
std::array<std::vector<void *>, NTHREADS> openedIpcHandles;
291+
292+
auto openHandlesFn = [this, &ipcHandles, &openedIpcHandles](size_t tid) {
293+
// TODO: better to wait on the barrier here so that every thread
294+
// starts at the same point. But std::barrier is available only
295+
// starting from C++20
296+
for (auto ipcHandle : ipcHandles) {
297+
void *ptr;
298+
umf_result_t ret = umfOpenIPCHandle(pool.get(), ipcHandle, &ptr);
299+
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);
300+
openedIpcHandles[tid].push_back(ptr);
301+
}
302+
};
303+
304+
std::vector<std::thread> threads;
305+
for (int i = 0; i < NTHREADS; i++) {
306+
threads.emplace_back(openHandlesFn, i);
307+
}
308+
309+
for (auto &thread : threads) {
310+
thread.join();
311+
}
312+
threads.clear();
313+
314+
auto closeHandlesFn = [&openedIpcHandles](size_t tid) {
315+
for (void *ptr : openedIpcHandles[tid]) {
316+
umf_result_t ret = umfCloseIPCHandle(ptr);
317+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
318+
}
319+
};
320+
321+
for (int i = 0; i < NTHREADS; i++) {
322+
threads.emplace_back(closeHandlesFn, i);
323+
}
324+
325+
for (auto &thread : threads) {
326+
thread.join();
327+
}
328+
threads.clear();
329+
330+
for (void *ptr : ptrs) {
331+
umf_result_t ret = umfPoolFree(pool.get(), ptr);
332+
EXPECT_EQ(ret, UMF_RESULT_SUCCESS);
333+
}
334+
335+
EXPECT_EQ(stat.openCount, stat.closeCount);
336+
}

0 commit comments

Comments
 (0)