6
6
7
7
#include < numa.h>
8
8
#include < numaif.h>
9
+ #include < sched.h>
9
10
11
+ #include " test_helpers.h"
10
12
#include < umf/providers/provider_os_memory.h>
11
13
12
14
static umf_os_memory_provider_params_t UMF_OS_MEMORY_PROVIDER_PARAMS_TEST =
13
15
umfOsMemoryProviderParamsDefault ();
14
16
15
17
std::vector<int > get_available_numa_nodes_numbers () {
16
- if (numa_available () == -1 || numa_all_nodes_ptr == nullptr ) {
18
+ if (numa_available () == -1 || numa_all_nodes_ptr == nullptr ||
19
+ numa_num_task_nodes () <= 1 ) {
17
20
return {-1 };
18
21
}
19
22
@@ -28,7 +31,35 @@ std::vector<int> get_available_numa_nodes_numbers() {
28
31
return available_numa_nodes_numbers;
29
32
}
30
33
31
- struct testNumaNodes : public testing ::TestWithParam<int > {
34
+ std::vector<int > get_available_cpus () {
35
+ std::vector<int > available_cpus;
36
+ cpu_set_t *mask = CPU_ALLOC (CPU_SETSIZE);
37
+ CPU_ZERO (mask);
38
+
39
+ int ret = sched_getaffinity (0 , sizeof (cpu_set_t ), mask);
40
+ UT_ASSERTeq (ret, 0 );
41
+ // Get all available cpus.
42
+ for (size_t i = 0 ; i < CPU_SETSIZE; ++i) {
43
+ if (CPU_ISSET (i, mask)) {
44
+ available_cpus.emplace_back (i);
45
+ }
46
+ }
47
+ CPU_FREE (mask);
48
+
49
+ return available_cpus;
50
+ }
51
+
52
+ void set_all_available_nodemask_bits (bitmask *nodemask) {
53
+ UT_ASSERTne (numa_available (), -1 );
54
+ UT_ASSERTne (numa_all_nodes_ptr, nullptr );
55
+
56
+ numa_bitmask_clearall (nodemask);
57
+
58
+ // Set all available NUMA nodes numbers.
59
+ copy_bitmask_to_bitmask (numa_all_nodes_ptr, nodemask);
60
+ }
61
+
62
+ struct testNuma : testing::Test {
32
63
void SetUp () override {
33
64
if (numa_available () == -1 ) {
34
65
GTEST_SKIP () << " Test skipped, NUMA not available" ;
@@ -56,10 +87,19 @@ struct testNumaNodes : public testing::TestWithParam<int> {
56
87
int numa_node;
57
88
int ret = get_mempolicy (&numa_node, nullptr , 0 , addr,
58
89
MPOL_F_NODE | MPOL_F_ADDR);
59
- EXPECT_EQ (ret, 0 );
90
+ UT_ASSERTeq (ret, 0 );
60
91
return numa_node;
61
92
}
62
93
94
+ struct bitmask *retrieve_nodemask (void *addr) {
95
+ struct bitmask *retrieved_nodemask = numa_allocate_nodemask ();
96
+ UT_ASSERTne (nodemask, nullptr );
97
+ int ret = get_mempolicy (nullptr , retrieved_nodemask->maskp ,
98
+ nodemask->size , addr, MPOL_F_ADDR);
99
+ UT_ASSERTeq (ret, 0 );
100
+ return retrieved_nodemask;
101
+ }
102
+
63
103
void TearDown () override {
64
104
umf_result_t umf_result;
65
105
if (ptr) {
@@ -82,14 +122,20 @@ struct testNumaNodes : public testing::TestWithParam<int> {
82
122
umf_memory_provider_handle_t os_memory_provider = nullptr ;
83
123
};
84
124
125
+ struct testNumaOnAllNodes : testNuma, testing::WithParamInterface<int > {};
126
+ struct testNumaOnAllCpus : testNuma, testing::WithParamInterface<int > {};
127
+
85
128
INSTANTIATE_TEST_SUITE_P (
86
- testNumaNodesAllocations, testNumaNodes ,
129
+ testNumaNodesAllocations, testNumaOnAllNodes ,
87
130
::testing::ValuesIn (get_available_numa_nodes_numbers()));
88
131
132
+ INSTANTIATE_TEST_SUITE_P (testNumaNodesAllocationsAllCpus, testNumaOnAllCpus,
133
+ ::testing::ValuesIn (get_available_cpus()));
134
+
89
135
// Test for allocations on numa nodes. This test will be executed for all numa nodes
90
136
// available on the system. The available nodes are returned in vector from the
91
137
// get_available_numa_nodes_numbers() function and passed to test as parameters.
92
- TEST_P (testNumaNodes , checkNumaNodesAllocations) {
138
+ TEST_P (testNumaOnAllNodes , checkNumaNodesAllocations) {
93
139
int numa_node_number = GetParam ();
94
140
umf_os_memory_provider_params_t os_memory_provider_params =
95
141
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
@@ -111,3 +157,269 @@ TEST_P(testNumaNodes, checkNumaNodesAllocations) {
111
157
int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
112
158
ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
113
159
}
160
+
161
+ // Test for allocations on numa nodes with mode preferred. It runs for all available
162
+ // numa nodes obtained from the get_available_numa_nodes_numbers() function.
163
+ TEST_P (testNumaOnAllNodes, checkModePreferred) {
164
+ int numa_node_number = GetParam ();
165
+ umf_os_memory_provider_params_t os_memory_provider_params =
166
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
167
+ os_memory_provider_params.maxnode = numa_node_number + 1 ;
168
+ numa_bitmask_setbit (nodemask, numa_node_number);
169
+ os_memory_provider_params.nodemask = nodemask->maskp ;
170
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED;
171
+ initOsProvider (os_memory_provider_params);
172
+
173
+ umf_result_t umf_result;
174
+ umf_result =
175
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
176
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
177
+ ASSERT_NE (ptr, nullptr );
178
+
179
+ // This pointer must point to an initialized value before retrieving a number of
180
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
181
+ memset (ptr, 0xFF , alloc_size);
182
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
183
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
184
+ }
185
+
186
+ // Test for allocation on numa node with mode preferred and an empty nodeset.
187
+ // For the empty nodeset the memory is allocated on the node of the CPU that
188
+ // triggered the allocation. This test will be executed on all available cpus
189
+ // on which the process can run.
190
+ TEST_P (testNumaOnAllCpus, checkModePreferredEmptyNodeset) {
191
+ int cpu = GetParam ();
192
+ umf_os_memory_provider_params_t os_memory_provider_params =
193
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
194
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED;
195
+ initOsProvider (os_memory_provider_params);
196
+
197
+ umf_result_t umf_result;
198
+ umf_result =
199
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
200
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
201
+ ASSERT_NE (ptr, nullptr );
202
+
203
+ cpu_set_t *mask = CPU_ALLOC (CPU_SETSIZE);
204
+ CPU_ZERO (mask);
205
+
206
+ CPU_SET (cpu, mask);
207
+ int ret = sched_setaffinity (0 , sizeof (cpu_set_t ), mask);
208
+ UT_ASSERTeq (ret, 0 );
209
+
210
+ int numa_node_number = numa_node_of_cpu (cpu);
211
+
212
+ // This pointer must point to an initialized value before retrieving a number of
213
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
214
+ memset (ptr, 0xFF , alloc_size);
215
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
216
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
217
+ CPU_FREE (mask);
218
+ }
219
+
220
+ // Test for allocation on numa node with local mode enabled. The memory is
221
+ // allocated on the node of the CPU that triggered the allocation.
222
+ TEST_F (testNuma, checkModeLocal) {
223
+ umf_os_memory_provider_params_t os_memory_provider_params =
224
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
225
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL;
226
+ initOsProvider (os_memory_provider_params);
227
+
228
+ umf_result_t umf_result;
229
+ umf_result =
230
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
231
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
232
+ ASSERT_NE (ptr, nullptr );
233
+
234
+ int cpu = sched_getcpu ();
235
+ int numa_node_number = numa_node_of_cpu (cpu);
236
+
237
+ // This pointer must point to an initialized value before retrieving a number of
238
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
239
+ memset (ptr, 0xFF , alloc_size);
240
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
241
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
242
+ }
243
+
244
+ // Test for allocation on numa node with default mode enabled.
245
+ // Since no policy is set by the set_mempolicy function, it should
246
+ // default to the system-wide default policy, which allocates pages
247
+ // on the node of the CPU that triggers the allocation.
248
+ TEST_F (testNuma, checkModeDefault) {
249
+ umf_os_memory_provider_params_t os_memory_provider_params =
250
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
251
+ initOsProvider (os_memory_provider_params);
252
+
253
+ umf_result_t umf_result;
254
+ umf_result =
255
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
256
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
257
+ ASSERT_NE (ptr, nullptr );
258
+
259
+ int cpu = sched_getcpu ();
260
+ int numa_node_number = numa_node_of_cpu (cpu);
261
+
262
+ // This pointer must point to an initialized value before retrieving a number of
263
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
264
+ memset (ptr, 0xFF , alloc_size);
265
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
266
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
267
+ }
268
+
269
+ // Test for allocation on numa node with default mode enabled.
270
+ // Since the bind mode is set by setmempolicy, it should fall back to it.
271
+ TEST_F (testNuma, checkModeDefaultSetMempolicy) {
272
+ int numa_node_number = get_available_numa_nodes_numbers ()[0 ];
273
+ numa_bitmask_setbit (nodemask, numa_node_number);
274
+ umf_os_memory_provider_params_t os_memory_provider_params =
275
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
276
+ initOsProvider (os_memory_provider_params);
277
+
278
+ long ret = set_mempolicy (MPOL_BIND, nodemask->maskp , nodemask->size );
279
+ ASSERT_EQ (ret, 0 );
280
+
281
+ umf_result_t umf_result;
282
+ umf_result =
283
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
284
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
285
+ ASSERT_NE (ptr, nullptr );
286
+
287
+ // This pointer must point to an initialized value before retrieving a number of
288
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
289
+ memset (ptr, 0xFF , alloc_size);
290
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
291
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
292
+ }
293
+
294
+ // Test for allocations on numa nodes with interleave mode enabled.
295
+ // The page allocations are interleaved across the set of nodes specified in nodemask.
296
+ TEST_F (testNuma, checkModeInterleave) {
297
+ constexpr int pages_num = 1024 ;
298
+ size_t page_size = sysconf (_SC_PAGE_SIZE);
299
+ umf_os_memory_provider_params_t os_memory_provider_params =
300
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
301
+ os_memory_provider_params.maxnode = numa_max_node ();
302
+ set_all_available_nodemask_bits (nodemask);
303
+ os_memory_provider_params.nodemask = nodemask->maskp ;
304
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
305
+ initOsProvider (os_memory_provider_params);
306
+
307
+ umf_result_t umf_result;
308
+ umf_result = umfMemoryProviderAlloc (os_memory_provider,
309
+ pages_num * page_size, 0 , &ptr);
310
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
311
+ ASSERT_NE (ptr, nullptr );
312
+
313
+ // This pointer must point to an initialized value before retrieving a number of
314
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
315
+ memset (ptr, 0xFF , pages_num * page_size);
316
+
317
+ // Test where each page will be allocated.
318
+ std::vector<int > numa_nodes_numbers = get_available_numa_nodes_numbers ();
319
+ size_t index = 0 ;
320
+
321
+ for (size_t i = 0 ; i < (size_t )pages_num; i++) {
322
+ if (index == (size_t )numa_nodes_numbers.size ()) {
323
+ index = 0 ;
324
+ }
325
+ ASSERT_EQ (numa_nodes_numbers[index],
326
+ retrieve_numa_node_number ((char *)ptr + page_size * i));
327
+ index++;
328
+ }
329
+
330
+ bitmask *retrieved_nodemask = retrieve_nodemask (ptr);
331
+ int ret = numa_bitmask_equal (retrieved_nodemask, nodemask);
332
+ ASSERT_EQ (ret, 1 );
333
+ numa_bitmask_free (retrieved_nodemask);
334
+ }
335
+
336
+ // Test for allocations on a single numa node with interleave mode enabled.
337
+ TEST_F (testNuma, checkModeInterleaveSingleNode) {
338
+ constexpr int pages_num = 1024 ;
339
+ size_t page_size = sysconf (_SC_PAGE_SIZE);
340
+ umf_os_memory_provider_params_t os_memory_provider_params =
341
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
342
+ os_memory_provider_params.maxnode = numa_max_node ();
343
+ std::vector<int > numa_nodes_numbers = get_available_numa_nodes_numbers ();
344
+ numa_bitmask_setbit (nodemask, numa_nodes_numbers[0 ]);
345
+ os_memory_provider_params.nodemask = nodemask->maskp ;
346
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
347
+ initOsProvider (os_memory_provider_params);
348
+
349
+ umf_result_t umf_result;
350
+ umf_result = umfMemoryProviderAlloc (os_memory_provider,
351
+ pages_num * page_size, 0 , &ptr);
352
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
353
+ ASSERT_NE (ptr, nullptr );
354
+
355
+ // This pointer must point to an initialized value before retrieving a number of
356
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
357
+ memset (ptr, 0xFF , pages_num * page_size);
358
+
359
+ ASSERT_EQ (numa_nodes_numbers[0 ], retrieve_numa_node_number (ptr));
360
+ }
361
+
362
+ // Negative tests
363
+
364
+ // Test for allocation on numa node with local mode enabled when maxnode
365
+ // and nodemask are set. For the local mode the maxnode and nodemask must be an empty set.
366
+ TEST_F (testNuma, checkModeLocalIllegalArgSet) {
367
+ umf_os_memory_provider_params_t os_memory_provider_params =
368
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
369
+ os_memory_provider_params.maxnode = numa_max_node ();
370
+ set_all_available_nodemask_bits (nodemask);
371
+ os_memory_provider_params.nodemask = nodemask->maskp ;
372
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL;
373
+ umf_result_t umf_result;
374
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
375
+ &os_memory_provider_params,
376
+ &os_memory_provider);
377
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
378
+ ASSERT_EQ (os_memory_provider, nullptr );
379
+ }
380
+
381
+ // Test for allocation on numa node with default mode enabled when maxnode
382
+ // and nodemask are set. For the default mode the maxnode and nodemask must be an empty set.
383
+ TEST_F (testNuma, checkModeDefaultIllegalArgSet) {
384
+ umf_os_memory_provider_params_t os_memory_provider_params =
385
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
386
+ os_memory_provider_params.maxnode = numa_max_node ();
387
+ set_all_available_nodemask_bits (nodemask);
388
+ os_memory_provider_params.nodemask = nodemask->maskp ;
389
+ umf_result_t umf_result;
390
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
391
+ &os_memory_provider_params,
392
+ &os_memory_provider);
393
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
394
+ ASSERT_EQ (os_memory_provider, nullptr );
395
+ }
396
+
397
+ // Test for allocation on numa node with bind mode enabled when maxnode
398
+ // and nodemask are unset. For the bind mode the maxnode and nodemask
399
+ // must be a non-empty set.
400
+ TEST_F (testNuma, checkModeBindIllegalArgSet) {
401
+ umf_os_memory_provider_params_t os_memory_provider_params =
402
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
403
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_BIND;
404
+ umf_result_t umf_result;
405
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
406
+ &os_memory_provider_params,
407
+ &os_memory_provider);
408
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
409
+ ASSERT_EQ (os_memory_provider, nullptr );
410
+ }
411
+
412
+ // Test for allocation on numa node with interleave mode enabled when maxnode
413
+ // and nodemask are unset. For the interleve mode the maxnode and nodemask
414
+ // must be a non-empty set.
415
+ TEST_F (testNuma, checkModeInterleaveIllegalArgSet) {
416
+ umf_os_memory_provider_params_t os_memory_provider_params =
417
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
418
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
419
+ umf_result_t umf_result;
420
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
421
+ &os_memory_provider_params,
422
+ &os_memory_provider);
423
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
424
+ ASSERT_EQ (os_memory_provider, nullptr );
425
+ }
0 commit comments