6
6
7
7
#include < numa.h>
8
8
#include < numaif.h>
9
+ #include < sched.h>
9
10
11
+ #include " test_helpers.h"
10
12
#include < umf/providers/provider_os_memory.h>
11
13
12
14
static umf_os_memory_provider_params_t UMF_OS_MEMORY_PROVIDER_PARAMS_TEST =
13
15
umfOsMemoryProviderParamsDefault ();
14
16
15
17
std::vector<int > get_available_numa_nodes_numbers () {
16
- if (numa_available () == -1 || numa_all_nodes_ptr == nullptr ) {
18
+ if (numa_available () == -1 || numa_all_nodes_ptr == nullptr || numa_num_task_nodes () <= 1 ) {
17
19
return {-1 };
18
20
}
19
21
@@ -28,7 +30,35 @@ std::vector<int> get_available_numa_nodes_numbers() {
28
30
return available_numa_nodes_numbers;
29
31
}
30
32
31
- struct testNumaNodes : public testing ::TestWithParam<int > {
33
+ std::vector<int > get_available_cpus () {
34
+ std::vector<int > available_cpus;
35
+ cpu_set_t *mask = CPU_ALLOC (CPU_SETSIZE);
36
+ CPU_ZERO (mask);
37
+
38
+ int ret = sched_getaffinity (0 , sizeof (cpu_set_t ), mask);
39
+ UT_ASSERTeq (ret, 0 );
40
+ // Get all available cpus.
41
+ for (size_t i = 0 ; i < CPU_SETSIZE; ++i) {
42
+ if (CPU_ISSET (i, mask)) {
43
+ available_cpus.emplace_back (i);
44
+ }
45
+ }
46
+ CPU_FREE (mask);
47
+
48
+ return available_cpus;
49
+ }
50
+
51
+ void set_all_available_nodemask_bits (bitmask *nodemask) {
52
+ UT_ASSERTne (numa_available (), -1 );
53
+ UT_ASSERTne (numa_all_nodes_ptr, nullptr );
54
+
55
+ numa_bitmask_clearall (nodemask);
56
+
57
+ // Set all available NUMA nodes numbers.
58
+ copy_bitmask_to_bitmask (numa_all_nodes_ptr, nodemask);
59
+ }
60
+
61
+ struct testNuma : testing::Test {
32
62
void SetUp () override {
33
63
if (numa_available () == -1 ) {
34
64
GTEST_SKIP () << " Test skipped, NUMA not available" ;
@@ -56,10 +86,19 @@ struct testNumaNodes : public testing::TestWithParam<int> {
56
86
int numa_node;
57
87
int ret = get_mempolicy (&numa_node, nullptr , 0 , addr,
58
88
MPOL_F_NODE | MPOL_F_ADDR);
59
- EXPECT_EQ (ret, 0 );
89
+ UT_ASSERTeq (ret, 0 );
60
90
return numa_node;
61
91
}
62
92
93
+ struct bitmask *retrieve_nodemask (void *addr) {
94
+ struct bitmask *retrieved_nodemask = numa_allocate_nodemask ();
95
+ UT_ASSERTne (nodemask, nullptr );
96
+ int ret = get_mempolicy (nullptr , retrieved_nodemask->maskp ,
97
+ nodemask->size , addr, MPOL_F_ADDR);
98
+ UT_ASSERTeq (ret, 0 );
99
+ return retrieved_nodemask;
100
+ }
101
+
63
102
void TearDown () override {
64
103
umf_result_t umf_result;
65
104
if (ptr) {
@@ -82,14 +121,21 @@ struct testNumaNodes : public testing::TestWithParam<int> {
82
121
umf_memory_provider_handle_t os_memory_provider = nullptr ;
83
122
};
84
123
124
+ struct testNumaOnAllNodes : testNuma, testing::WithParamInterface<int > {};
125
+ struct testNumaOnAllCpus : testNuma, testing::WithParamInterface<int > {};
126
+
85
127
INSTANTIATE_TEST_SUITE_P (
86
- testNumaNodesAllocations, testNumaNodes ,
128
+ testNumaNodesAllocations, testNumaOnAllNodes ,
87
129
::testing::ValuesIn (get_available_numa_nodes_numbers()));
88
130
131
+ INSTANTIATE_TEST_SUITE_P (
132
+ testNumaNodesAllocationsAllCpus, testNumaOnAllCpus,
133
+ ::testing::ValuesIn (get_available_cpus()));
134
+
89
135
// Test for allocations on numa nodes. This test will be executed for all numa nodes
90
136
// available on the system. The available nodes are returned in vector from the
91
137
// get_available_numa_nodes_numbers() function and passed to test as parameters.
92
- TEST_P (testNumaNodes , checkNumaNodesAllocations) {
138
+ TEST_P (testNumaOnAllNodes , checkNumaNodesAllocations) {
93
139
int numa_node_number = GetParam ();
94
140
umf_os_memory_provider_params_t os_memory_provider_params =
95
141
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
@@ -111,3 +157,270 @@ TEST_P(testNumaNodes, checkNumaNodesAllocations) {
111
157
int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
112
158
ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
113
159
}
160
+
161
+ // Test for allocations on numa nodes with mode preferred. It runs for all available
162
+ // numa nodes obtained from the get_available_numa_nodes_numbers() function.
163
+ TEST_P (testNumaOnAllNodes, checkModePreferred) {
164
+ int numa_node_number = GetParam ();
165
+ umf_os_memory_provider_params_t os_memory_provider_params =
166
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
167
+ os_memory_provider_params.maxnode = numa_node_number + 1 ;
168
+ numa_bitmask_setbit (nodemask, numa_node_number);
169
+ os_memory_provider_params.nodemask = nodemask->maskp ;
170
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED;
171
+ initOsProvider (os_memory_provider_params);
172
+
173
+ umf_result_t umf_result;
174
+ umf_result =
175
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
176
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
177
+ ASSERT_NE (ptr, nullptr );
178
+
179
+ // This pointer must point to an initialized value before retrieving a number of
180
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
181
+ memset (ptr, 0xFF , alloc_size);
182
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
183
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
184
+ }
185
+
186
+ // Test for allocation on numa node with mode preferred and an empty nodeset.
187
+ // For the empty nodeset the memory is allocated on the node of the CPU that
188
+ // triggered the allocation. This test will be executed on all available cpus
189
+ // on which the process can run.
190
+ TEST_P (testNumaOnAllCpus, checkModePreferredEmptyNodeset) {
191
+ int cpu = GetParam ();
192
+ umf_os_memory_provider_params_t os_memory_provider_params =
193
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
194
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED;
195
+ initOsProvider (os_memory_provider_params);
196
+
197
+ umf_result_t umf_result;
198
+ umf_result =
199
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
200
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
201
+ ASSERT_NE (ptr, nullptr );
202
+
203
+ cpu_set_t *mask = CPU_ALLOC (CPU_SETSIZE);;
204
+ CPU_ZERO (mask);
205
+
206
+ CPU_SET (cpu, mask);
207
+ int ret = sched_setaffinity (0 , sizeof (cpu_set_t ), mask);
208
+ UT_ASSERTeq (ret, 0 );
209
+
210
+ int numa_node_number = numa_node_of_cpu (cpu);
211
+
212
+ // This pointer must point to an initialized value before retrieving a number of
213
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
214
+ memset (ptr, 0xFF , alloc_size);
215
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
216
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
217
+ CPU_FREE (mask);
218
+ }
219
+
220
+ // Test for allocation on numa node with local mode enabled. The memory is
221
+ // allocated on the node of the CPU that triggered the allocation.
222
+ TEST_F (testNuma, checkModeLocal) {
223
+ umf_os_memory_provider_params_t os_memory_provider_params =
224
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
225
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL;
226
+ initOsProvider (os_memory_provider_params);
227
+
228
+ umf_result_t umf_result;
229
+ umf_result =
230
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
231
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
232
+ ASSERT_NE (ptr, nullptr );
233
+
234
+ int cpu = sched_getcpu ();
235
+ int numa_node_number = numa_node_of_cpu (cpu);
236
+
237
+ // This pointer must point to an initialized value before retrieving a number of
238
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
239
+ memset (ptr, 0xFF , alloc_size);
240
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
241
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
242
+ }
243
+
244
+ // Test for allocation on numa node with default mode enabled.
245
+ // Since no policy is set by the set_mempolicy function, it should
246
+ // default to the system-wide default policy, which allocates pages
247
+ // on the node of the CPU that triggers the allocation.
248
+ TEST_F (testNuma, checkModeDefault) {
249
+ umf_os_memory_provider_params_t os_memory_provider_params =
250
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
251
+ initOsProvider (os_memory_provider_params);
252
+
253
+ umf_result_t umf_result;
254
+ umf_result =
255
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
256
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
257
+ ASSERT_NE (ptr, nullptr );
258
+
259
+ int cpu = sched_getcpu ();
260
+ int numa_node_number = numa_node_of_cpu (cpu);
261
+
262
+ // This pointer must point to an initialized value before retrieving a number of
263
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
264
+ memset (ptr, 0xFF , alloc_size);
265
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
266
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
267
+ }
268
+
269
+ // Test for allocation on numa node with default mode enabled.
270
+ // Since the bind mode is set by setmempolicy, it should fall back to it.
271
+ TEST_F (testNuma, checkModeDefaultSetMempolicy) {
272
+ int numa_node_number = get_available_numa_nodes_numbers ()[0 ];
273
+ numa_bitmask_setbit (nodemask, numa_node_number);
274
+ umf_os_memory_provider_params_t os_memory_provider_params =
275
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
276
+ initOsProvider (os_memory_provider_params);
277
+
278
+ long ret = set_mempolicy (MPOL_BIND, nodemask->maskp , nodemask->size );
279
+ ASSERT_EQ (ret, 0 );
280
+
281
+ umf_result_t umf_result;
282
+ umf_result =
283
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
284
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
285
+ ASSERT_NE (ptr, nullptr );
286
+
287
+ // This pointer must point to an initialized value before retrieving a number of
288
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
289
+ memset (ptr, 0xFF , alloc_size);
290
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
291
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
292
+ }
293
+
294
+ // Test for allocations on numa nodes with interleave mode enabled.
295
+ // The page allocations are interleaved across the set of nodes specified in nodemask.
296
+ TEST_F (testNuma, checkModeInterleave) {
297
+ constexpr int pages_num = 1024 ;
298
+ size_t page_size = sysconf (_SC_PAGE_SIZE);
299
+ umf_os_memory_provider_params_t os_memory_provider_params =
300
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
301
+ os_memory_provider_params.maxnode = numa_max_node ();
302
+ set_all_available_nodemask_bits (nodemask);
303
+ os_memory_provider_params.nodemask = nodemask->maskp ;
304
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
305
+ initOsProvider (os_memory_provider_params);
306
+
307
+ umf_result_t umf_result;
308
+ umf_result = umfMemoryProviderAlloc (os_memory_provider,
309
+ pages_num * page_size, 0 , &ptr);
310
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
311
+ ASSERT_NE (ptr, nullptr );
312
+
313
+ // This pointer must point to an initialized value before retrieving a number of
314
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
315
+ memset (ptr, 0xFF , pages_num * page_size);
316
+
317
+ // Test where each page will be allocated.
318
+ std::vector<int > numa_nodes_numbers = get_available_numa_nodes_numbers ();
319
+ size_t index = 0 ;
320
+
321
+ for (size_t i = 0 ; i < (size_t )pages_num; i++) {
322
+ if (index == (size_t )numa_nodes_numbers.size ()) {
323
+ index = 0 ;
324
+ }
325
+ ASSERT_EQ (numa_nodes_numbers[index],
326
+ retrieve_numa_node_number ((char *)ptr + page_size * i));
327
+ index++;
328
+ }
329
+
330
+ bitmask *retrieved_nodemask = retrieve_nodemask (ptr);
331
+ int ret = numa_bitmask_equal (retrieved_nodemask, nodemask);
332
+ ASSERT_EQ (ret, 1 );
333
+ numa_bitmask_free (retrieved_nodemask);
334
+ }
335
+
336
+ // Test for allocations on a single numa node with interleave mode enabled.
337
+ TEST_F (testNuma, checkModeInterleaveSingleNode) {
338
+ constexpr int pages_num = 1024 ;
339
+ size_t page_size = sysconf (_SC_PAGE_SIZE);
340
+ umf_os_memory_provider_params_t os_memory_provider_params =
341
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
342
+ os_memory_provider_params.maxnode = numa_max_node ();
343
+ std::vector<int > numa_nodes_numbers = get_available_numa_nodes_numbers ();
344
+ numa_bitmask_setbit (nodemask, numa_nodes_numbers[0 ]);
345
+ os_memory_provider_params.nodemask = nodemask->maskp ;
346
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
347
+ initOsProvider (os_memory_provider_params);
348
+
349
+ umf_result_t umf_result;
350
+ umf_result = umfMemoryProviderAlloc (os_memory_provider,
351
+ pages_num * page_size, 0 , &ptr);
352
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
353
+ ASSERT_NE (ptr, nullptr );
354
+
355
+ // This pointer must point to an initialized value before retrieving a number of
356
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
357
+ memset (ptr, 0xFF , pages_num * page_size);
358
+
359
+ ASSERT_EQ (numa_nodes_numbers[0 ],
360
+ retrieve_numa_node_number (ptr));
361
+ }
362
+
363
+ // Negative tests
364
+
365
+ // Test for allocation on numa node with local mode enabled when maxnode
366
+ // and nodemask are set. For the local mode the maxnode and nodemask must be an empty set.
367
+ TEST_F (testNuma, checkModeLocalIllegalArgSet) {
368
+ umf_os_memory_provider_params_t os_memory_provider_params =
369
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
370
+ os_memory_provider_params.maxnode = numa_max_node ();
371
+ set_all_available_nodemask_bits (nodemask);
372
+ os_memory_provider_params.nodemask = nodemask->maskp ;
373
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL;
374
+ umf_result_t umf_result;
375
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
376
+ &os_memory_provider_params,
377
+ &os_memory_provider);
378
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
379
+ ASSERT_EQ (os_memory_provider, nullptr );
380
+ }
381
+
382
+ // Test for allocation on numa node with default mode enabled when maxnode
383
+ // and nodemask are set. For the default mode the maxnode and nodemask must be an empty set.
384
+ TEST_F (testNuma, checkModeDefaultIllegalArgSet) {
385
+ umf_os_memory_provider_params_t os_memory_provider_params =
386
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
387
+ os_memory_provider_params.maxnode = numa_max_node ();
388
+ set_all_available_nodemask_bits (nodemask);
389
+ os_memory_provider_params.nodemask = nodemask->maskp ;
390
+ umf_result_t umf_result;
391
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
392
+ &os_memory_provider_params,
393
+ &os_memory_provider);
394
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
395
+ ASSERT_EQ (os_memory_provider, nullptr );
396
+ }
397
+
398
+ // Test for allocation on numa node with bind mode enabled when maxnode
399
+ // and nodemask are unset. For the bind mode the maxnode and nodemask
400
+ // must be a non-empty set.
401
+ TEST_F (testNuma, checkModeBindIllegalArgSet) {
402
+ umf_os_memory_provider_params_t os_memory_provider_params =
403
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
404
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_BIND;
405
+ umf_result_t umf_result;
406
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
407
+ &os_memory_provider_params,
408
+ &os_memory_provider);
409
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
410
+ ASSERT_EQ (os_memory_provider, nullptr );
411
+ }
412
+
413
+ // Test for allocation on numa node with interleave mode enabled when maxnode
414
+ // and nodemask are unset. For the interleve mode the maxnode and nodemask
415
+ // must be a non-empty set.
416
+ TEST_F (testNuma, checkModeInterleaveIllegalArgSet) {
417
+ umf_os_memory_provider_params_t os_memory_provider_params =
418
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
419
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
420
+ umf_result_t umf_result;
421
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
422
+ &os_memory_provider_params,
423
+ &os_memory_provider);
424
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
425
+ ASSERT_EQ (os_memory_provider, nullptr );
426
+ }
0 commit comments