|
6 | 6 |
|
7 | 7 | #include <numa.h>
|
8 | 8 | #include <numaif.h>
|
| 9 | +#include <sched.h> |
9 | 10 |
|
10 | 11 | #include <umf/providers/provider_os_memory.h>
|
11 | 12 |
|
@@ -60,6 +61,30 @@ struct testNumaNodes : public testing::TestWithParam<int> {
|
60 | 61 | return numa_node;
|
61 | 62 | }
|
62 | 63 |
|
| 64 | + long unsigned int retrieve_nodemask(void *addr) { |
| 65 | + struct bitmask *retrieved_nodemask = numa_allocate_nodemask(); |
| 66 | + EXPECT_NE(nodemask, nullptr); |
| 67 | + int ret = get_mempolicy(nullptr, retrieved_nodemask->maskp, |
| 68 | + numa_max_possible_node(), addr, MPOL_F_ADDR); |
| 69 | + EXPECT_EQ(ret, 0); |
| 70 | + long unsigned int retrieved_nodemask_value = |
| 71 | + *(retrieved_nodemask->maskp); |
| 72 | + numa_bitmask_free(retrieved_nodemask); |
| 73 | + return retrieved_nodemask_value; |
| 74 | + } |
| 75 | + |
| 76 | + void set_all_available_nodemask_bits() { |
| 77 | + ASSERT_NE(numa_available(), -1); |
| 78 | + ASSERT_NE(numa_all_nodes_ptr, nullptr); |
| 79 | + |
| 80 | + // Set all available NUMA nodes numbers. |
| 81 | + for (size_t i = 0; i < (size_t)numa_max_node() + 1; ++i) { |
| 82 | + if (numa_bitmask_isbitset(numa_all_nodes_ptr, i) == 1) { |
| 83 | + numa_bitmask_setbit(nodemask, i); |
| 84 | + } |
| 85 | + } |
| 86 | + } |
| 87 | + |
63 | 88 | void TearDown() override {
|
64 | 89 | umf_result_t umf_result;
|
65 | 90 | if (ptr) {
|
@@ -111,3 +136,102 @@ TEST_P(testNumaNodes, checkNumaNodesAllocations) {
|
111 | 136 | int retrieved_numa_node_number = retrieve_numa_node_number(ptr);
|
112 | 137 | ASSERT_EQ(retrieved_numa_node_number, numa_node_number);
|
113 | 138 | }
|
| 139 | + |
| 140 | +// Test for allocations on numa nodes with mode preferred. It runs for all available |
| 141 | +// numa nodes obtained from the get_available_numa_nodes_numbers() function. |
| 142 | +TEST_P(testNumaNodes, checkModePreferred) { |
| 143 | + int numa_node_number = GetParam(); |
| 144 | + umf_os_memory_provider_params_t os_memory_provider_params = |
| 145 | + UMF_OS_MEMORY_PROVIDER_PARAMS_TEST; |
| 146 | + os_memory_provider_params.maxnode = numa_node_number + 1; |
| 147 | + numa_bitmask_setbit(nodemask, numa_node_number); |
| 148 | + os_memory_provider_params.nodemask = nodemask->maskp; |
| 149 | + os_memory_provider_params.numa_mode = UMF_NUMA_MODE_PREFERRED; |
| 150 | + initOsProvider(os_memory_provider_params); |
| 151 | + |
| 152 | + umf_result_t umf_result; |
| 153 | + umf_result = |
| 154 | + umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr); |
| 155 | + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); |
| 156 | + ASSERT_NE(ptr, nullptr); |
| 157 | + |
| 158 | + // This pointer must point to an initialized value before retrieving a number of |
| 159 | + // the numa node that the pointer was allocated on (calling get_mempolicy). |
| 160 | + memset(ptr, 0xFF, alloc_size); |
| 161 | + int retrieved_numa_node_number = retrieve_numa_node_number(ptr); |
| 162 | + ASSERT_EQ(retrieved_numa_node_number, numa_node_number); |
| 163 | +} |
| 164 | + |
| 165 | +// Test for allocation on numa node with local mode enabled. The memory is |
| 166 | +// allocated on the node of the CPU that triggered the allocation. |
| 167 | +// This test is run multiple times (depending on how many numa nodes |
| 168 | +// are on the system) just to make sure it works, which I'm not sure makes |
| 169 | +// sense so possible TODO: change it to TEST_F. |
| 170 | +TEST_P(testNumaNodes, checkModeLocal) { |
| 171 | + int cpu = sched_getcpu(); |
| 172 | + int numa_node_number = numa_node_of_cpu(cpu); |
| 173 | + umf_os_memory_provider_params_t os_memory_provider_params = |
| 174 | + UMF_OS_MEMORY_PROVIDER_PARAMS_TEST; |
| 175 | + os_memory_provider_params.numa_mode = UMF_NUMA_MODE_LOCAL; |
| 176 | + initOsProvider(os_memory_provider_params); |
| 177 | + |
| 178 | + umf_result_t umf_result; |
| 179 | + umf_result = |
| 180 | + umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr); |
| 181 | + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); |
| 182 | + ASSERT_NE(ptr, nullptr); |
| 183 | + |
| 184 | + // This pointer must point to an initialized value before retrieving a number of |
| 185 | + // the numa node that the pointer was allocated on (calling get_mempolicy). |
| 186 | + memset(ptr, 0xFF, alloc_size); |
| 187 | + int retrieved_numa_node_number = retrieve_numa_node_number(ptr); |
| 188 | + ASSERT_EQ(retrieved_numa_node_number, numa_node_number); |
| 189 | +} |
| 190 | + |
| 191 | +// Test for allocation on numa node with default mode enabled. |
| 192 | +// Since no policy is set by the set_mempolicy function, it should |
| 193 | +// default to The system-wide default policy, which allocates pages |
| 194 | +// on the node of the CPU that triggers the allocation. |
| 195 | +TEST_P(testNumaNodes, checkModeDefault) { |
| 196 | + int cpu = sched_getcpu(); |
| 197 | + int numa_node_number = numa_node_of_cpu(cpu); |
| 198 | + umf_os_memory_provider_params_t os_memory_provider_params = |
| 199 | + UMF_OS_MEMORY_PROVIDER_PARAMS_TEST; |
| 200 | + initOsProvider(os_memory_provider_params); |
| 201 | + |
| 202 | + umf_result_t umf_result; |
| 203 | + umf_result = |
| 204 | + umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr); |
| 205 | + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); |
| 206 | + ASSERT_NE(ptr, nullptr); |
| 207 | + |
| 208 | + // This pointer must point to an initialized value before retrieving a number of |
| 209 | + // the numa node that the pointer was allocated on (calling get_mempolicy). |
| 210 | + memset(ptr, 0xFF, alloc_size); |
| 211 | + int retrieved_numa_node_number = retrieve_numa_node_number(ptr); |
| 212 | + ASSERT_EQ(retrieved_numa_node_number, numa_node_number); |
| 213 | +} |
| 214 | + |
| 215 | +// Test for allocations on numa nodes with interleave mode enabled. |
| 216 | +// The page allocations are interleaved across the set of nodes specified in nodemask. |
| 217 | +TEST_P(testNumaNodes, checkModeInterleave) { |
| 218 | + umf_os_memory_provider_params_t os_memory_provider_params = |
| 219 | + UMF_OS_MEMORY_PROVIDER_PARAMS_TEST; |
| 220 | + os_memory_provider_params.maxnode = numa_max_node() + 1; |
| 221 | + set_all_available_nodemask_bits(); |
| 222 | + os_memory_provider_params.nodemask = nodemask->maskp; |
| 223 | + os_memory_provider_params.numa_mode = UMF_NUMA_MODE_INTERLEAVE; |
| 224 | + initOsProvider(os_memory_provider_params); |
| 225 | + |
| 226 | + umf_result_t umf_result; |
| 227 | + umf_result = |
| 228 | + umfMemoryProviderAlloc(os_memory_provider, alloc_size, 0, &ptr); |
| 229 | + ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS); |
| 230 | + ASSERT_NE(ptr, nullptr); |
| 231 | + |
| 232 | + // This pointer must point to an initialized value before retrieving a number of |
| 233 | + // the numa node that the pointer was allocated on (calling get_mempolicy). |
| 234 | + memset(ptr, 0xFF, alloc_size); |
| 235 | + long unsigned int retrieved_nodemask_value = retrieve_nodemask(ptr); |
| 236 | + ASSERT_EQ(retrieved_nodemask_value, *(nodemask->maskp)); |
| 237 | +} |
0 commit comments