3
3
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
4
4
5
5
#include " base.hpp"
6
- #include " numa_helpers.h "
6
+ #include " numa_helpers.hpp "
7
7
#include " test_helpers.h"
8
8
9
9
#include < algorithm>
@@ -122,14 +122,10 @@ struct testNuma : testing::Test {
122
122
};
123
123
124
124
struct testNumaOnEachNode : testNuma, testing::WithParamInterface<unsigned > {};
125
- struct testNumaOnEachCpu : testNuma, testing::WithParamInterface<int > {};
126
125
127
126
INSTANTIATE_TEST_SUITE_P (testNumaNodesAllocations, testNumaOnEachNode,
128
127
::testing::ValuesIn (get_available_numa_nodes()));
129
128
130
- INSTANTIATE_TEST_SUITE_P (testNumaNodesAllocationsAllCpus, testNumaOnEachCpu,
131
- ::testing::ValuesIn (get_available_cpus()));
132
-
133
129
// Test for allocations on numa nodes. It will be executed on each of
134
130
// the available numa nodes.
135
131
TEST_P (testNumaOnEachNode, checkNumaNodesAllocations) {
@@ -152,8 +148,7 @@ TEST_P(testNumaOnEachNode, checkNumaNodesAllocations) {
152
148
153
149
// 'ptr' must point to an initialized value before retrieving its numa node
154
150
memset (ptr, 0xFF , alloc_size);
155
- int retrieved_numa_node_number = getNumaNodeByPtr (ptr);
156
- EXPECT_EQ (retrieved_numa_node_number, numa_node_number);
151
+ EXPECT_NODE_EQ (ptr, numa_node_number);
157
152
}
158
153
159
154
// Test for allocations on numa nodes with mode preferred. It will be executed
@@ -177,8 +172,7 @@ TEST_P(testNumaOnEachNode, checkModePreferred) {
177
172
178
173
// 'ptr' must point to an initialized value before retrieving its numa node
179
174
memset (ptr, 0xFF , alloc_size);
180
- int retrieved_numa_node_number = getNumaNodeByPtr (ptr);
181
- EXPECT_EQ (retrieved_numa_node_number, numa_node_number);
175
+ EXPECT_NODE_EQ (ptr, numa_node_number);
182
176
}
183
177
184
178
// Test for allocation on numa node with default mode enabled.
@@ -202,8 +196,7 @@ TEST_P(testNumaOnEachNode, checkModeDefaultSetMempolicy) {
202
196
203
197
// 'ptr' must point to an initialized value before retrieving its numa node
204
198
memset (ptr, 0xFF , alloc_size);
205
- int retrieved_numa_node_number = getNumaNodeByPtr (ptr);
206
- EXPECT_EQ (retrieved_numa_node_number, numa_node_number);
199
+ EXPECT_NODE_EQ (ptr, numa_node_number);
207
200
}
208
201
209
202
// Test for allocations on a single numa node with interleave mode enabled.
@@ -229,10 +222,14 @@ TEST_P(testNumaOnEachNode, checkModeInterleaveSingleNode) {
229
222
230
223
// 'ptr' must point to an initialized value before retrieving its numa node
231
224
memset (ptr, 0xFF , pages_num * page_size);
232
- int retrieved_numa_node_number = getNumaNodeByPtr (ptr);
233
- EXPECT_EQ (retrieved_numa_node_number, numa_node_number);
225
+ EXPECT_NODE_EQ (ptr, numa_node_number);
234
226
}
235
227
228
+ struct testNumaOnEachCpu : testNuma, testing::WithParamInterface<int > {};
229
+
230
+ INSTANTIATE_TEST_SUITE_P (testNumaNodesAllocationsAllCpus, testNumaOnEachCpu,
231
+ ::testing::ValuesIn (get_available_cpus()));
232
+
236
233
// Test for allocation on numa node with mode preferred and an empty nodeset.
237
234
// For the empty nodeset the memory is allocated on the node of the CPU that
238
235
// triggered the allocation. It will be executed on each available CPU.
@@ -269,8 +266,7 @@ TEST_P(testNumaOnEachCpu, checkModePreferredEmptyNodeset) {
269
266
270
267
// 'ptr' must point to an initialized value before retrieving its numa node
271
268
memset (ptr, 0xFF , alloc_size);
272
- int retrieved_numa_node_number = getNumaNodeByPtr (ptr);
273
- EXPECT_EQ (retrieved_numa_node_number, numa_node_number);
269
+ EXPECT_NODE_EQ (ptr, numa_node_number);
274
270
}
275
271
276
272
// Test for allocation on numa node with local mode enabled. The memory is
@@ -307,8 +303,7 @@ TEST_P(testNumaOnEachCpu, checkModeLocal) {
307
303
308
304
// 'ptr' must point to an initialized value before retrieving its numa node
309
305
memset (ptr, 0xFF , alloc_size);
310
- int retrieved_numa_node_number = getNumaNodeByPtr (ptr);
311
- EXPECT_EQ (retrieved_numa_node_number, numa_node_number);
306
+ EXPECT_NODE_EQ (ptr, numa_node_number);
312
307
}
313
308
314
309
// Test for allocation on numa node with default mode enabled.
@@ -332,8 +327,7 @@ TEST_F(testNuma, checkModeDefault) {
332
327
333
328
// 'ptr' must point to an initialized value before retrieving its numa node
334
329
memset (ptr, 0xFF , alloc_size);
335
- int retrieved_numa_node_number = getNumaNodeByPtr (ptr);
336
- EXPECT_EQ (retrieved_numa_node_number, numa_node_number);
330
+ EXPECT_NODE_EQ (ptr, numa_node_number);
337
331
}
338
332
339
333
// Test for allocations on numa nodes with interleave mode enabled.
@@ -363,11 +357,11 @@ TEST_F(testNuma, checkModeInterleave) {
363
357
364
358
// Test where each page will be allocated.
365
359
// Get the first numa node for ptr; Each next page is expected to be on next nodes.
366
- size_t index = getNumaNodeByPtr ((char *)ptr);
360
+ int index = 0 ;
361
+ ASSERT_NO_FATAL_FAILURE (getNumaNodeByPtr (ptr, &index));
367
362
for (size_t i = 1 ; i < (size_t )pages_num; i++) {
368
363
index = (index + 1 ) % numa_nodes.size ();
369
- ASSERT_EQ (numa_nodes[index],
370
- getNumaNodeByPtr ((char *)ptr + page_size * i));
364
+ EXPECT_NODE_EQ ((char *)ptr + page_size * i, numa_nodes[index]);
371
365
}
372
366
373
367
bitmask *retrieved_nodemask = retrieve_nodemask (ptr);
@@ -407,13 +401,12 @@ TEST_F(testNuma, checkModeInterleaveCustomPartSize) {
407
401
memset (ptr, 0xFF , size);
408
402
// Test where each page will be allocated.
409
403
// Get the first numa node for ptr; Each next part is expected to be on next nodes.
410
- size_t index = getNumaNodeByPtr ((char *)ptr);
404
+ int index = 0 ;
405
+ ASSERT_NO_FATAL_FAILURE (getNumaNodeByPtr (ptr, &index));
411
406
for (size_t i = 0 ; i < (size_t )part_num; i++) {
412
407
for (size_t j = 0 ; j < part_size; j += page_size) {
413
- EXPECT_EQ (numa_nodes[index],
414
- getNumaNodeByPtr ((char *)ptr + part_size * i + j))
415
- << " for ptr " << ptr << " + " << part_size << " * " << i
416
- << " + " << j;
408
+ ASSERT_NODE_EQ (((char *)ptr + page_size * i + j),
409
+ numa_nodes[index]);
417
410
}
418
411
index = (index + 1 ) % numa_nodes.size ();
419
412
}
@@ -425,7 +418,7 @@ TEST_F(testNuma, checkModeInterleaveCustomPartSize) {
425
418
ASSERT_EQ (umf_result, UMF_RESULT_SUCCESS);
426
419
ASSERT_NE (ptr, nullptr );
427
420
memset (ptr, 0xFF , size);
428
- EXPECT_EQ ( numa_nodes[index], getNumaNodeByPtr (ptr) );
421
+ EXPECT_NODE_EQ (ptr, numa_nodes[index]);
429
422
umfMemoryProviderFree (os_memory_provider, ptr, size);
430
423
}
431
424
@@ -627,7 +620,10 @@ TEST_F(testNuma, checkModeBindOnAllNodes) {
627
620
628
621
// 'ptr' must point to an initialized value before retrieving its numa node
629
622
memset (ptr, 0xFF , alloc_size);
630
- unsigned retrieved_numa_node_number = (unsigned )getNumaNodeByPtr (ptr);
623
+
624
+ int node;
625
+ ASSERT_NO_FATAL_FAILURE (getNumaNodeByPtr (ptr, &node));
626
+ unsigned retrieved_numa_node_number = (unsigned )node;
631
627
632
628
int read_cpu = sched_getcpu ();
633
629
int read_numa_node = numa_node_of_cpu (read_cpu);
0 commit comments