6
6
7
7
#include < numa.h>
8
8
#include < numaif.h>
9
+ #include < sched.h>
9
10
11
+ #include " test_helpers.h"
10
12
#include < umf/providers/provider_os_memory.h>
11
13
12
14
static umf_os_memory_provider_params_t UMF_OS_MEMORY_PROVIDER_PARAMS_TEST =
@@ -28,7 +30,17 @@ std::vector<int> get_available_numa_nodes_numbers() {
28
30
return available_numa_nodes_numbers;
29
31
}
30
32
31
- struct testNumaNodes : public testing ::TestWithParam<int > {
33
+ void set_all_available_nodemask_bits (bitmask *nodemask) {
34
+ UT_ASSERTne (numa_available (), -1 );
35
+ UT_ASSERTne (numa_all_nodes_ptr, nullptr );
36
+
37
+ numa_bitmask_clearall (nodemask);
38
+
39
+ // Set all available NUMA nodes numbers.
40
+ copy_bitmask_to_bitmask (numa_all_nodes_ptr, nodemask);
41
+ }
42
+
43
+ struct testNuma : testing::Test {
32
44
void SetUp () override {
33
45
if (numa_available () == -1 ) {
34
46
GTEST_SKIP () << " Test skipped, NUMA not available" ;
@@ -56,10 +68,19 @@ struct testNumaNodes : public testing::TestWithParam<int> {
56
68
int numa_node;
57
69
int ret = get_mempolicy (&numa_node, nullptr , 0 , addr,
58
70
MPOL_F_NODE | MPOL_F_ADDR);
59
- EXPECT_EQ (ret, 0 );
71
+ UT_ASSERTeq (ret, 0 );
60
72
return numa_node;
61
73
}
62
74
75
+ struct bitmask *retrieve_nodemask (void *addr) {
76
+ struct bitmask *retrieved_nodemask = numa_allocate_nodemask ();
77
+ UT_ASSERTne (nodemask, nullptr );
78
+ int ret = get_mempolicy (nullptr , retrieved_nodemask->maskp ,
79
+ nodemask->size , addr, MPOL_F_ADDR);
80
+ UT_ASSERTeq (ret, 0 );
81
+ return retrieved_nodemask;
82
+ }
83
+
63
84
void TearDown () override {
64
85
umf_result_t umf_result;
65
86
if (ptr) {
@@ -82,14 +103,16 @@ struct testNumaNodes : public testing::TestWithParam<int> {
82
103
umf_memory_provider_handle_t os_memory_provider = nullptr ;
83
104
};
84
105
106
+ struct testNumaOnAllNodes : testNuma, testing::WithParamInterface<int > {};
107
+
85
108
INSTANTIATE_TEST_SUITE_P (
86
- testNumaNodesAllocations, testNumaNodes ,
109
+ testNumaNodesAllocations, testNumaOnAllNodes ,
87
110
::testing::ValuesIn (get_available_numa_nodes_numbers()));
88
111
89
112
// Test for allocations on numa nodes. This test will be executed for all numa nodes
90
113
// available on the system. The available nodes are returned in vector from the
91
114
// get_available_numa_nodes_numbers() function and passed to test as parameters.
92
- TEST_P (testNumaNodes , checkNumaNodesAllocations) {
115
+ TEST_P (testNumaOnAllNodes , checkNumaNodesAllocations) {
93
116
int numa_node_number = GetParam ();
94
117
umf_os_memory_provider_params_t os_memory_provider_params =
95
118
UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
@@ -111,3 +134,234 @@ TEST_P(testNumaNodes, checkNumaNodesAllocations) {
111
134
int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
112
135
ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
113
136
}
137
+
138
+ // Test for allocations on numa nodes with mode preferred. It runs for all available
139
+ // numa nodes obtained from the get_available_numa_nodes_numbers() function.
140
+ TEST_P (testNumaOnAllNodes, checkModePreferred) {
141
+ int numa_node_number = GetParam ();
142
+ umf_os_memory_provider_params_t os_memory_provider_params =
143
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
144
+ os_memory_provider_params.maxnode = numa_node_number + 1 ;
145
+ numa_bitmask_setbit (nodemask, numa_node_number);
146
+ os_memory_provider_params.nodemask = nodemask->maskp ;
147
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED;
148
+ initOsProvider (os_memory_provider_params);
149
+
150
+ umf_result_t umf_result;
151
+ umf_result =
152
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
153
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
154
+ ASSERT_NE (ptr, nullptr );
155
+
156
+ // This pointer must point to an initialized value before retrieving a number of
157
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
158
+ memset (ptr, 0xFF , alloc_size);
159
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
160
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
161
+ }
162
+
163
+ // Test for allocation on numa node with mode preferred and an empty nodeset.
164
+ // For the empty nodeset the memory is allocated on the node of the CPU that
165
+ // triggered the allocation.
166
+ TEST_F (testNuma, checkModePreferredEmptyNodeset) {
167
+ umf_os_memory_provider_params_t os_memory_provider_params =
168
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
169
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED;
170
+ initOsProvider (os_memory_provider_params);
171
+
172
+ umf_result_t umf_result;
173
+ umf_result =
174
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
175
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
176
+ ASSERT_NE (ptr, nullptr );
177
+
178
+ int cpu = sched_getcpu ();
179
+ int numa_node_number = numa_node_of_cpu (cpu);
180
+
181
+ // This pointer must point to an initialized value before retrieving a number of
182
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
183
+ memset (ptr, 0xFF , alloc_size);
184
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
185
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
186
+ }
187
+
188
+ // Test for allocation on numa node with local mode enabled. The memory is
189
+ // allocated on the node of the CPU that triggered the allocation.
190
+ TEST_F (testNuma, checkModeLocal) {
191
+ umf_os_memory_provider_params_t os_memory_provider_params =
192
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
193
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL;
194
+ initOsProvider (os_memory_provider_params);
195
+
196
+ umf_result_t umf_result;
197
+ umf_result =
198
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
199
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
200
+ ASSERT_NE (ptr, nullptr );
201
+
202
+ int cpu = sched_getcpu ();
203
+ int numa_node_number = numa_node_of_cpu (cpu);
204
+
205
+ // This pointer must point to an initialized value before retrieving a number of
206
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
207
+ memset (ptr, 0xFF , alloc_size);
208
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
209
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
210
+ }
211
+
212
+ // Test for allocation on numa node with default mode enabled.
213
+ // Since no policy is set by the set_mempolicy function, it should
214
+ // default to the system-wide default policy, which allocates pages
215
+ // on the node of the CPU that triggers the allocation.
216
+ TEST_F (testNuma, checkModeDefault) {
217
+ umf_os_memory_provider_params_t os_memory_provider_params =
218
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
219
+ initOsProvider (os_memory_provider_params);
220
+
221
+ umf_result_t umf_result;
222
+ umf_result =
223
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
224
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
225
+ ASSERT_NE (ptr, nullptr );
226
+
227
+ int cpu = sched_getcpu ();
228
+ int numa_node_number = numa_node_of_cpu (cpu);
229
+
230
+ // This pointer must point to an initialized value before retrieving a number of
231
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
232
+ memset (ptr, 0xFF , alloc_size);
233
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
234
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
235
+ }
236
+
237
+ // Test for allocation on numa node with default mode enabled.
238
+ // Since the bind mode is set by setmempolicy, it should fall back to it.
239
+ TEST_F (testNuma, checkModeDefaultSetMempolicy) {
240
+ int numa_node_number = get_available_numa_nodes_numbers ()[0 ];
241
+ numa_bitmask_setbit (nodemask, numa_node_number);
242
+ umf_os_memory_provider_params_t os_memory_provider_params =
243
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
244
+ initOsProvider (os_memory_provider_params);
245
+
246
+ long ret = set_mempolicy (MPOL_BIND, nodemask->maskp , nodemask->size );
247
+ ASSERT_EQ (ret, 0 );
248
+
249
+ umf_result_t umf_result;
250
+ umf_result =
251
+ umfMemoryProviderAlloc (os_memory_provider, alloc_size, 0 , &ptr);
252
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
253
+ ASSERT_NE (ptr, nullptr );
254
+
255
+ // This pointer must point to an initialized value before retrieving a number of
256
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
257
+ memset (ptr, 0xFF , alloc_size);
258
+ int retrieved_numa_node_number = retrieve_numa_node_number (ptr);
259
+ ASSERT_EQ (retrieved_numa_node_number, numa_node_number);
260
+ }
261
+
262
+ // Test for allocations on numa nodes with interleave mode enabled.
263
+ // The page allocations are interleaved across the set of nodes specified in nodemask.
264
+ TEST_F (testNuma, checkModeInterleave) {
265
+ int num_page = 1024 ;
266
+ size_t page_size = sysconf (_SC_PAGE_SIZE);
267
+ umf_os_memory_provider_params_t os_memory_provider_params =
268
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
269
+ os_memory_provider_params.maxnode = numa_max_node ();
270
+ set_all_available_nodemask_bits (nodemask);
271
+ os_memory_provider_params.nodemask = nodemask->maskp ;
272
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
273
+ initOsProvider (os_memory_provider_params);
274
+
275
+ umf_result_t umf_result;
276
+ umf_result = umfMemoryProviderAlloc (os_memory_provider,
277
+ num_page * page_size, 0 , &ptr);
278
+ ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
279
+ ASSERT_NE (ptr, nullptr );
280
+
281
+ // This pointer must point to an initialized value before retrieving a number of
282
+ // the numa node that the pointer was allocated on (calling get_mempolicy).
283
+ memset (ptr, 0xFF , num_page * page_size);
284
+
285
+ // Test where each page will be allocated.
286
+ std::vector<int > numa_nodes_numbers = get_available_numa_nodes_numbers ();
287
+ size_t index = 0 ;
288
+
289
+ for (size_t i = 0 ; i < (size_t )num_page; i++) {
290
+ if (index == (size_t )numa_nodes_numbers.size ()) {
291
+ index = 0 ;
292
+ }
293
+ ASSERT_EQ (numa_nodes_numbers[index],
294
+ retrieve_numa_node_number ((char *)ptr + page_size * i));
295
+ index++;
296
+ }
297
+
298
+ bitmask *retrieved_nodemask = retrieve_nodemask (ptr);
299
+ int ret = numa_bitmask_equal (retrieved_nodemask, nodemask);
300
+ ASSERT_EQ (ret, 1 );
301
+ numa_bitmask_free (retrieved_nodemask);
302
+ }
303
+
304
+ // Negative tests
305
+
306
+ // Test for allocation on numa node with local mode enabled when maxnode
307
+ // and nodemask are set. For the local mode the maxnode and nodemask must be an empty set.
308
+ TEST_F (testNuma, checkModeLocalIllegalArgSet) {
309
+ umf_os_memory_provider_params_t os_memory_provider_params =
310
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
311
+ os_memory_provider_params.maxnode = numa_max_node ();
312
+ set_all_available_nodemask_bits (nodemask);
313
+ os_memory_provider_params.nodemask = nodemask->maskp ;
314
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL;
315
+ umf_result_t umf_result;
316
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
317
+ &os_memory_provider_params,
318
+ &os_memory_provider);
319
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
320
+ ASSERT_EQ (os_memory_provider, nullptr );
321
+ }
322
+
323
+ // Test for allocation on numa node with default mode enabled when maxnode
324
+ // and nodemask are set. For the default mode the maxnode and nodemask must be an empty set.
325
+ TEST_F (testNuma, checkModeDefaultIllegalArgSet) {
326
+ umf_os_memory_provider_params_t os_memory_provider_params =
327
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
328
+ os_memory_provider_params.maxnode = numa_max_node ();
329
+ set_all_available_nodemask_bits (nodemask);
330
+ os_memory_provider_params.nodemask = nodemask->maskp ;
331
+ umf_result_t umf_result;
332
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
333
+ &os_memory_provider_params,
334
+ &os_memory_provider);
335
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
336
+ ASSERT_EQ (os_memory_provider, nullptr );
337
+ }
338
+
339
+ // Test for allocation on numa node with bind mode enabled when maxnode
340
+ // and nodemask are unset. For the bind mode the maxnode and nodemask
341
+ // must be a non-empty set.
342
+ TEST_F (testNuma, checkModeBindIllegalArgSet) {
343
+ umf_os_memory_provider_params_t os_memory_provider_params =
344
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
345
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_BIND;
346
+ umf_result_t umf_result;
347
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
348
+ &os_memory_provider_params,
349
+ &os_memory_provider);
350
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
351
+ ASSERT_EQ (os_memory_provider, nullptr );
352
+ }
353
+
354
+ // Test for allocation on numa node with interleave mode enabled when maxnode
355
+ // and nodemask are unset. For the interleve mode the maxnode and nodemask
356
+ // must be a non-empty set.
357
+ TEST_F (testNuma, checkModeInterleaveIllegalArgSet) {
358
+ umf_os_memory_provider_params_t os_memory_provider_params =
359
+ UMF_OS_MEMORY_PROVIDER_PARAMS_TEST;
360
+ os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE;
361
+ umf_result_t umf_result;
362
+ umf_result = umfMemoryProviderCreate (umfOsMemoryProviderOps (),
363
+ &os_memory_provider_params,
364
+ &os_memory_provider);
365
+ ASSERT_EQ (umf_result, UMF_RESULT_ERROR_INVALID_ARGUMENT);
366
+ ASSERT_EQ (os_memory_provider, nullptr );
367
+ }
0 commit comments