Skip to content

Commit 494d30c

Browse files
Damian Duylukaszstolarczuk
Damian Duy
authored andcommitted
Add OS provider tests for different bind modes
1 parent 12abae0 commit 494d30c

File tree

1 file changed

+316
-4
lines changed

1 file changed

+316
-4
lines changed

test/provider_os_memory_multiple_numa_nodes.cpp

Lines changed: 316 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,17 @@
77

88
#include <numa.h>
99
#include <numaif.h>
10+
#include <sched.h>
1011

12+
#include "test_helpers.h"
1113
#include <umf/providers/provider_os_memory.h>
1214

1315
static umf_os_memory_provider_params_t UMF_OS_MEMORY_PROVIDER_PARAMS_TEST =
1416
umfOsMemoryProviderParamsDefault();
1517

1618
std::vector<int> get_available_numa_nodes_numbers() {
17-
if (numa_available() == -1 || numa_all_nodes_ptr == nullptr) {
19+
if (numa_available() == -1 || numa_all_nodes_ptr == nullptr ||
20+
numa_num_task_nodes() <= 1) {
1821
return {-1};
1922
}
2023

@@ -29,7 +32,35 @@ std::vector<int> get_available_numa_nodes_numbers() {
2932
return available_numa_nodes_numbers;
3033
}
3134

32-
struct testNumaNodes : public testing::TestWithParam<int> {
35+
std::vector<int> get_available_cpus() {
36+
std::vector<int> available_cpus;
37+
cpu_set_t *mask = CPU_ALLOC(CPU_SETSIZE);
38+
CPU_ZERO(mask);
39+
40+
int ret = sched_getaffinity(0, sizeof(cpu_set_t), mask);
41+
UT_ASSERTeq(ret, 0);
42+
// Get all available cpus.
43+
for (size_t i = 0; i < CPU_SETSIZE; ++i) {
44+
if (CPU_ISSET(i, mask)) {
45+
available_cpus.emplace_back(i);
46+
}
47+
}
48+
CPU_FREE(mask);
49+
50+
return available_cpus;
51+
}
52+
53+
void set_all_available_nodemask_bits(bitmask *nodemask) {
54+
UT_ASSERTne(numa_available(), -1);
55+
UT_ASSERTne(numa_all_nodes_ptr, nullptr);
56+
57+
numa_bitmask_clearall(nodemask);
58+
59+
// Set all available NUMA nodes numbers.
60+
copy_bitmask_to_bitmask(numa_all_nodes_ptr, nodemask);
61+
}
62+
63+
struct testNuma : testing::Test {
3364
void SetUp() override {
3465
if (numa_available() == -1) {
3566
GTEST_SKIP() << "Test skipped, NUMA not available";
@@ -53,6 +84,15 @@ struct testNumaNodes : public testing::TestWithParam<int> {
5384
ASSERT_NE(os_memory_provider, nullptr);
5485
}
5586

87+
struct bitmask *retrieve_nodemask(void *addr) {
88+
struct bitmask *retrieved_nodemask = numa_allocate_nodemask();
89+
UT_ASSERTne(nodemask, nullptr);
90+
int ret = get_mempolicy(nullptr, retrieved_nodemask->maskp,
91+
nodemask->size, addr, MPOL_F_ADDR);
92+
UT_ASSERTeq(ret, 0);
93+
return retrieved_nodemask;
94+
}
95+
5696
void TearDown() override {
5797
umf_result_t umf_result;
5898
if (ptr) {
@@ -75,14 +115,20 @@ struct testNumaNodes : public testing::TestWithParam<int> {
75115
umf_memory_provider_handle_t os_memory_provider = nullptr;
76116
};
77117

118+
struct testNumaOnAllNodes : testNuma, testing::WithParamInterface<int> {};
119+
struct testNumaOnAllCpus : testNuma, testing::WithParamInterface<int> {};
120+
78121
INSTANTIATE_TEST_SUITE_P(
79-
testNumaNodesAllocations, testNumaNodes,
122+
testNumaNodesAllocations, testNumaOnAllNodes,
80123
::testing::ValuesIn(get_available_numa_nodes_numbers()));
81124

125+
INSTANTIATE_TEST_SUITE_P(testNumaNodesAllocationsAllCpus, testNumaOnAllCpus,
126+
::testing::ValuesIn(get_available_cpus()));
127+
82128
// Test for allocations on numa nodes. This test will be executed for all numa nodes
83129
// available on the system. The available nodes are returned in vector from the
84130
// get_available_numa_nodes_numbers() function and passed to test as parameters.
85-
TEST_P(testNumaNodes, checkNumaNodesAllocations) {
131+
TEST_P(testNumaOnAllNodes, checkNumaNodesAllocations) {
86132
int param = GetParam();
87133
ASSERT_GE(param, 0);
88134
unsigned numa_node_number = param;
@@ -107,3 +153,269 @@ TEST_P(testNumaNodes, checkNumaNodesAllocations) {
107153
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
108154
ASSERT_EQ(retrieved_numa_node_number, numa_node_number);
109155
}
156+
157+
// Test for allocations on numa nodes with mode preferred. It runs for all available
158+
// numa nodes obtained from the get_available_numa_nodes_numbers() function.
159+
TEST_P(testNumaOnAllNodes, checkModePreferred) {
160+
int numa_node_number = GetParam();
161+
umf_os_memory_provider_params_t os_memory_provider_params =
162+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
163+
os_memory_provider_params.maxnode = numa_node_number + 1;
164+
numa_bitmask_setbit(nodemask, numa_node_number);
165+
os_memory_provider_params.nodemask = nodemask->maskp;
166+
os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED;
167+
initOsProvider(os_memory_provider_params);
168+
169+
umf_result_t umf_result;
170+
umf_result =
171+
umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr);
172+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
173+
ASSERT_NE(ptr, nullptr);
174+
175+
// This pointer must point to an initialized value before retrieving a number of
176+
// the numa node that the pointer was allocated on (calling get_mempolicy).
177+
memset(ptr, 0xFF, alloc_size);
178+
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
179+
ASSERT_EQ(retrieved_numa_node_number, numa_node_number);
180+
}
181+
182+
// Test for allocation on numa node with mode preferred and an empty nodeset.
183+
// For the empty nodeset the memory is allocated on the node of the CPU that
184+
// triggered the allocation. This test will be executed on all available cpus
185+
// on which the process can run.
186+
TEST_P(testNumaOnAllCpus, checkModePreferredEmptyNodeset) {
187+
int cpu = GetParam();
188+
umf_os_memory_provider_params_t os_memory_provider_params =
189+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
190+
os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED;
191+
initOsProvider(os_memory_provider_params);
192+
193+
umf_result_t umf_result;
194+
umf_result =
195+
umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr);
196+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
197+
ASSERT_NE(ptr, nullptr);
198+
199+
cpu_set_t *mask = CPU_ALLOC(CPU_SETSIZE);
200+
CPU_ZERO(mask);
201+
202+
CPU_SET(cpu, mask);
203+
int ret = sched_setaffinity(0, sizeof(cpu_set_t), mask);
204+
UT_ASSERTeq(ret, 0);
205+
206+
int numa_node_number = numa_node_of_cpu(cpu);
207+
208+
// This pointer must point to an initialized value before retrieving a number of
209+
// the numa node that the pointer was allocated on (calling get_mempolicy).
210+
memset(ptr, 0xFF, alloc_size);
211+
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
212+
ASSERT_EQ(retrieved_numa_node_number, numa_node_number);
213+
CPU_FREE(mask);
214+
}
215+
216+
// Test for allocation on numa node with local mode enabled. The memory is
217+
// allocated on the node of the CPU that triggered the allocation.
218+
TEST_F(testNuma, checkModeLocal) {
219+
umf_os_memory_provider_params_t os_memory_provider_params =
220+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
221+
os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL;
222+
initOsProvider(os_memory_provider_params);
223+
224+
umf_result_t umf_result;
225+
umf_result =
226+
umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr);
227+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
228+
ASSERT_NE(ptr, nullptr);
229+
230+
int cpu = sched_getcpu();
231+
int numa_node_number = numa_node_of_cpu(cpu);
232+
233+
// This pointer must point to an initialized value before retrieving a number of
234+
// the numa node that the pointer was allocated on (calling get_mempolicy).
235+
memset(ptr, 0xFF, alloc_size);
236+
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
237+
ASSERT_EQ(retrieved_numa_node_number, numa_node_number);
238+
}
239+
240+
// Test for allocation on numa node with default mode enabled.
241+
// Since no policy is set by the set_mempolicy function, it should
242+
// default to the system-wide default policy, which allocates pages
243+
// on the node of the CPU that triggers the allocation.
244+
TEST_F(testNuma, checkModeDefault) {
245+
umf_os_memory_provider_params_t os_memory_provider_params =
246+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
247+
initOsProvider(os_memory_provider_params);
248+
249+
umf_result_t umf_result;
250+
umf_result =
251+
umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr);
252+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
253+
ASSERT_NE(ptr, nullptr);
254+
255+
int cpu = sched_getcpu();
256+
int numa_node_number = numa_node_of_cpu(cpu);
257+
258+
// This pointer must point to an initialized value before retrieving a number of
259+
// the numa node that the pointer was allocated on (calling get_mempolicy).
260+
memset(ptr, 0xFF, alloc_size);
261+
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
262+
ASSERT_EQ(retrieved_numa_node_number, numa_node_number);
263+
}
264+
265+
// Test for allocation on numa node with default mode enabled.
266+
// Since the bind mode is set by setmempolicy, it should fall back to it.
267+
TEST_F(testNuma, checkModeDefaultSetMempolicy) {
268+
int numa_node_number = get_available_numa_nodes_numbers()[0];
269+
numa_bitmask_setbit(nodemask, numa_node_number);
270+
umf_os_memory_provider_params_t os_memory_provider_params =
271+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
272+
initOsProvider(os_memory_provider_params);
273+
274+
long ret = set_mempolicy(MPOL_BIND, nodemask->maskp, nodemask->size);
275+
ASSERT_EQ(ret, 0);
276+
277+
umf_result_t umf_result;
278+
umf_result =
279+
umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr);
280+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
281+
ASSERT_NE(ptr, nullptr);
282+
283+
// This pointer must point to an initialized value before retrieving a number of
284+
// the numa node that the pointer was allocated on (calling get_mempolicy).
285+
memset(ptr, 0xFF, alloc_size);
286+
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
287+
ASSERT_EQ(retrieved_numa_node_number, numa_node_number);
288+
}
289+
290+
// Test for allocations on numa nodes with interleave mode enabled.
291+
// The page allocations are interleaved across the set of nodes specified in nodemask.
292+
TEST_F(testNuma, checkModeInterleave) {
293+
constexpr int pages_num = 1024;
294+
size_t page_size = sysconf(_SC_PAGE_SIZE);
295+
umf_os_memory_provider_params_t os_memory_provider_params =
296+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
297+
os_memory_provider_params.maxnode = numa_max_node();
298+
set_all_available_nodemask_bits(nodemask);
299+
os_memory_provider_params.nodemask = nodemask->maskp;
300+
os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
301+
initOsProvider(os_memory_provider_params);
302+
303+
umf_result_t umf_result;
304+
umf_result = umfMemoryProviderAlloc(os_memory_provider,
305+
pages_num * page_size, 0, &ptr);
306+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
307+
ASSERT_NE(ptr, nullptr);
308+
309+
// This pointer must point to an initialized value before retrieving a number of
310+
// the numa node that the pointer was allocated on (calling get_mempolicy).
311+
memset(ptr, 0xFF, pages_num * page_size);
312+
313+
// Test where each page will be allocated.
314+
std::vector<int> numa_nodes_numbers = get_available_numa_nodes_numbers();
315+
size_t index = 0;
316+
317+
for (size_t i = 0; i < (size_t)pages_num; i++) {
318+
if (index == (size_t)numa_nodes_numbers.size()) {
319+
index = 0;
320+
}
321+
ASSERT_EQ(numa_nodes_numbers[index],
322+
getNumaNodeByPtr((char *)ptr + page_size * i));
323+
index++;
324+
}
325+
326+
bitmask *retrieved_nodemask = retrieve_nodemask(ptr);
327+
int ret = numa_bitmask_equal(retrieved_nodemask, nodemask);
328+
ASSERT_EQ(ret, 1);
329+
numa_bitmask_free(retrieved_nodemask);
330+
}
331+
332+
// Test for allocations on a single numa node with interleave mode enabled.
333+
TEST_F(testNuma, checkModeInterleaveSingleNode) {
334+
constexpr int pages_num = 1024;
335+
size_t page_size = sysconf(_SC_PAGE_SIZE);
336+
umf_os_memory_provider_params_t os_memory_provider_params =
337+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
338+
os_memory_provider_params.maxnode = numa_max_node();
339+
std::vector<int> numa_nodes_numbers = get_available_numa_nodes_numbers();
340+
numa_bitmask_setbit(nodemask, numa_nodes_numbers[0]);
341+
os_memory_provider_params.nodemask = nodemask->maskp;
342+
os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
343+
initOsProvider(os_memory_provider_params);
344+
345+
umf_result_t umf_result;
346+
umf_result = umfMemoryProviderAlloc(os_memory_provider,
347+
pages_num * page_size, 0, &ptr);
348+
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
349+
ASSERT_NE(ptr, nullptr);
350+
351+
// This pointer must point to an initialized value before retrieving a number of
352+
// the numa node that the pointer was allocated on (calling get_mempolicy).
353+
memset(ptr, 0xFF, pages_num * page_size);
354+
355+
ASSERT_EQ(numa_nodes_numbers[0], getNumaNodeByPtr(ptr));
356+
}
357+
358+
// Negative tests
359+
360+
// Test for allocation on numa node with local mode enabled when maxnode
361+
// and nodemask are set. For the local mode the maxnode and nodemask must be an empty set.
362+
TEST_F(testNuma, checkModeLocalIllegalArgSet) {
363+
umf_os_memory_provider_params_t os_memory_provider_params =
364+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
365+
os_memory_provider_params.maxnode = numa_max_node();
366+
set_all_available_nodemask_bits(nodemask);
367+
os_memory_provider_params.nodemask = nodemask->maskp;
368+
os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL;
369+
umf_result_t umf_result;
370+
umf_result = umfMemoryProviderCreate(umfOsMemoryProviderOps(),
371+
&os_memory_provider_params,
372+
&os_memory_provider);
373+
ASSERT_EQ(umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
374+
ASSERT_EQ(os_memory_provider, nullptr);
375+
}
376+
377+
// Test for allocation on numa node with default mode enabled when maxnode
378+
// and nodemask are set. For the default mode the maxnode and nodemask must be an empty set.
379+
TEST_F(testNuma, checkModeDefaultIllegalArgSet) {
380+
umf_os_memory_provider_params_t os_memory_provider_params =
381+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
382+
os_memory_provider_params.maxnode = numa_max_node();
383+
set_all_available_nodemask_bits(nodemask);
384+
os_memory_provider_params.nodemask = nodemask->maskp;
385+
umf_result_t umf_result;
386+
umf_result = umfMemoryProviderCreate(umfOsMemoryProviderOps(),
387+
&os_memory_provider_params,
388+
&os_memory_provider);
389+
ASSERT_EQ(umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
390+
ASSERT_EQ(os_memory_provider, nullptr);
391+
}
392+
393+
// Test for allocation on numa node with bind mode enabled when maxnode
394+
// and nodemask are unset. For the bind mode the maxnode and nodemask
395+
// must be a non-empty set.
396+
TEST_F(testNuma, checkModeBindIllegalArgSet) {
397+
umf_os_memory_provider_params_t os_memory_provider_params =
398+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
399+
os_memory_provider_params.numa_mode = UMF_NUMA_MODE_BIND;
400+
umf_result_t umf_result;
401+
umf_result = umfMemoryProviderCreate(umfOsMemoryProviderOps(),
402+
&os_memory_provider_params,
403+
&os_memory_provider);
404+
ASSERT_EQ(umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
405+
ASSERT_EQ(os_memory_provider, nullptr);
406+
}
407+
408+
// Test for allocation on numa node with interleave mode enabled when maxnode
409+
// and nodemask are unset. For the interleave mode the maxnode and nodemask
410+
// must be a non-empty set.
411+
TEST_F(testNuma, checkModeInterleaveIllegalArgSet) {
412+
umf_os_memory_provider_params_t os_memory_provider_params =
413+
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
414+
os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
415+
umf_result_t umf_result;
416+
umf_result = umfMemoryProviderCreate(umfOsMemoryProviderOps(),
417+
&os_memory_provider_params,
418+
&os_memory_provider);
419+
ASSERT_EQ(umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
420+
ASSERT_EQ(os_memory_provider, nullptr);
421+
}

0 commit comments

Comments
 (0)