|
7 | 7 | #include "memspace_internal.h"
|
8 | 8 | #include "test_helpers.h"
|
9 | 9 |
|
10 |
| -#include <numa.h> |
11 |
| -#include <numaif.h> |
| 10 | +#include <hwloc.h> |
| 11 | +#include <thread> |
12 | 12 | #include <umf/memspace.h>
|
13 | 13 |
|
14 | 14 | using umf_test::test;
|
15 | 15 |
|
16 |
| -TEST_F(numaNodesTest, memspaceGet) { |
17 |
| - umf_memspace_handle_t hMemspace = umfMemspaceHighestBandwidthGet(); |
18 |
| - UT_ASSERTne(hMemspace, nullptr); |
| 16 | +// In HWLOC v2.3.0, the 'hwloc_location_type_e' enum is defined inside an |
| 17 | +// 'hwloc_location' struct. In newer versions, this enum is defined globally. |
| 18 | +// To prevent compile errors in C++ tests related this scope change |
| 19 | +// 'hwloc_location_type_e' has been aliased. |
| 20 | +using hwloc_location_type_alias = decltype(hwloc_location::type); |
| 21 | + |
| 22 | +static bool canQueryBandwidth(size_t nodeId) { |
| 23 | + hwloc_topology_t topology = nullptr; |
| 24 | + int ret = hwloc_topology_init(&topology); |
| 25 | + UT_ASSERTeq(ret, 0); |
| 26 | + ret = hwloc_topology_load(topology); |
| 27 | + UT_ASSERTeq(ret, 0); |
| 28 | + |
| 29 | + hwloc_obj_t numaNode = |
| 30 | + hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, nodeId); |
| 31 | + UT_ASSERTne(numaNode, nullptr); |
| 32 | + |
| 33 | + // Setup initiator structure. |
| 34 | + struct hwloc_location initiator; |
| 35 | + initiator.location.cpuset = numaNode->cpuset; |
| 36 | + initiator.type = hwloc_location_type_alias::HWLOC_LOCATION_TYPE_CPUSET; |
| 37 | + |
| 38 | + hwloc_uint64_t value = 0; |
| 39 | + ret = hwloc_memattr_get_value(topology, HWLOC_MEMATTR_ID_BANDWIDTH, |
| 40 | + numaNode, &initiator, 0, &value); |
| 41 | + |
| 42 | + hwloc_topology_destroy(topology); |
| 43 | + return (ret == 0); |
| 44 | +} |
| 45 | + |
| 46 | +struct memspaceHighestBandwidthTest : ::numaNodesTest { |
| 47 | + void SetUp() override { |
| 48 | + ::numaNodesTest::SetUp(); |
| 49 | + |
| 50 | + if (!canQueryBandwidth(nodeIds.front())) { |
| 51 | + GTEST_SKIP(); |
| 52 | + } |
| 53 | + |
| 54 | + hMemspace = umfMemspaceHighestBandwidthGet(); |
| 55 | + ASSERT_NE(hMemspace, nullptr); |
| 56 | + } |
| 57 | + |
| 58 | + umf_memspace_handle_t hMemspace = nullptr; |
| 59 | +}; |
| 60 | + |
| 61 | +struct memspaceHighestBandwidthProviderTest : ::memspaceHighestBandwidthTest { |
| 62 | + void SetUp() override { |
| 63 | + ::memspaceHighestBandwidthTest::SetUp(); |
| 64 | + |
| 65 | + if (!canQueryBandwidth(nodeIds.front())) { |
| 66 | + GTEST_SKIP(); |
| 67 | + } |
| 68 | + |
| 69 | + umf_result_t ret = |
| 70 | + umfMemoryProviderCreateFromMemspace(hMemspace, nullptr, &hProvider); |
| 71 | + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); |
| 72 | + ASSERT_NE(hProvider, nullptr); |
| 73 | + } |
| 74 | + |
| 75 | + void TearDown() override { |
| 76 | + ::memspaceHighestBandwidthTest::TearDown(); |
| 77 | + |
| 78 | + if (hProvider) { |
| 79 | + umfMemoryProviderDestroy(hProvider); |
| 80 | + } |
| 81 | + } |
| 82 | + |
| 83 | + umf_memory_provider_handle_t hProvider = nullptr; |
| 84 | +}; |
| 85 | + |
| 86 | +TEST_F(memspaceHighestBandwidthTest, providerFromMemspace) { |
| 87 | + umf_memory_provider_handle_t hProvider = nullptr; |
| 88 | + umf_result_t ret = |
| 89 | + umfMemoryProviderCreateFromMemspace(hMemspace, nullptr, &hProvider); |
| 90 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 91 | + UT_ASSERTne(hProvider, nullptr); |
| 92 | + |
| 93 | + umfMemoryProviderDestroy(hProvider); |
| 94 | +} |
| 95 | + |
| 96 | +TEST_F(memspaceHighestBandwidthProviderTest, allocFree) { |
| 97 | + void *ptr = nullptr; |
| 98 | + size_t size = SIZE_4K; |
| 99 | + size_t alignment = 0; |
| 100 | + |
| 101 | + umf_result_t ret = umfMemoryProviderAlloc(hProvider, size, alignment, &ptr); |
| 102 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 103 | + UT_ASSERTne(ptr, nullptr); |
| 104 | + |
| 105 | + // Access the allocation, so that all the pages associated with it are |
| 106 | + // allocated on some NUMA node. |
| 107 | + memset(ptr, 0xFF, size); |
| 108 | + |
| 109 | + ret = umfMemoryProviderFree(hProvider, ptr, size); |
| 110 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 111 | +} |
| 112 | + |
| 113 | +static std::vector<int> getAllCpus() { |
| 114 | + std::vector<int> allCpus; |
| 115 | + for (int i = 0; i < numa_num_possible_cpus(); ++i) { |
| 116 | + if (numa_bitmask_isbitset(numa_all_cpus_ptr, i)) { |
| 117 | + allCpus.push_back(i); |
| 118 | + } |
| 119 | + } |
| 120 | + |
| 121 | + return allCpus; |
| 122 | +} |
| 123 | + |
| 124 | +#define MAX_NODES 512 |
| 125 | + |
| 126 | +TEST_F(memspaceHighestBandwidthProviderTest, allocLocalMt) { |
| 127 | + auto pinAllocValidate = [&](umf_memory_provider_handle_t hProvider, |
| 128 | + int cpu) { |
| 129 | + hwloc_topology_t topology = NULL; |
| 130 | + UT_ASSERTeq(hwloc_topology_init(&topology), 0); |
| 131 | + UT_ASSERTeq(hwloc_topology_load(topology), 0); |
| 132 | + |
| 133 | + // Pin current thread to the provided CPU. |
| 134 | + hwloc_cpuset_t pinCpuset = hwloc_bitmap_alloc(); |
| 135 | + UT_ASSERTeq(hwloc_bitmap_set(pinCpuset, cpu), 0); |
| 136 | + UT_ASSERTeq( |
| 137 | + hwloc_set_cpubind(topology, pinCpuset, HWLOC_CPUBIND_THREAD), 0); |
| 138 | + |
| 139 | + // Confirm that the thread is pinned to the provided CPU. |
| 140 | + hwloc_cpuset_t curCpuset = hwloc_bitmap_alloc(); |
| 141 | + UT_ASSERTeq( |
| 142 | + hwloc_get_cpubind(topology, curCpuset, HWLOC_CPUBIND_THREAD), 0); |
| 143 | + UT_ASSERT(hwloc_bitmap_isequal(curCpuset, pinCpuset)); |
| 144 | + hwloc_bitmap_free(curCpuset); |
| 145 | + hwloc_bitmap_free(pinCpuset); |
| 146 | + |
| 147 | + // Allocate some memory. |
| 148 | + const size_t size = SIZE_4K; |
| 149 | + const size_t alignment = 0; |
| 150 | + void *ptr = nullptr; |
| 151 | + |
| 152 | + umf_result_t ret = |
| 153 | + umfMemoryProviderAlloc(hProvider, size, alignment, &ptr); |
| 154 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 155 | + UT_ASSERTne(ptr, nullptr); |
| 156 | + |
| 157 | + // Access the allocation, so that all the pages associated with it are |
| 158 | + // allocated on some NUMA node. |
| 159 | + memset(ptr, 0xFF, size); |
| 160 | + |
| 161 | + // Get the NUMA node responsible for this allocation. |
| 162 | + int mode = -1; |
| 163 | + std::vector<size_t> boundNodeIds; |
| 164 | + size_t allocNodeId = SIZE_MAX; |
| 165 | + getAllocationPolicy(ptr, maxNodeId, mode, boundNodeIds, allocNodeId); |
| 166 | + |
| 167 | + // Get the CPUs associated with the specified NUMA node. |
| 168 | + hwloc_obj_t allocNodeObj = |
| 169 | + hwloc_get_obj_by_type(topology, HWLOC_OBJ_NUMANODE, allocNodeId); |
| 170 | + |
| 171 | + unsigned nNodes = MAX_NODES; |
| 172 | + std::vector<hwloc_obj_t> localNodes(MAX_NODES); |
| 173 | + hwloc_location loc; |
| 174 | + loc.location.object = allocNodeObj, |
| 175 | + loc.type = HWLOC_LOCATION_TYPE_OBJECT; |
| 176 | + UT_ASSERTeq(hwloc_get_local_numanode_objs(topology, &loc, &nNodes, |
| 177 | + localNodes.data(), 0), |
| 178 | + 0); |
| 179 | + UT_ASSERT(nNodes <= MAX_NODES); |
| 180 | + |
| 181 | + // Confirm that the allocation from this thread was made to a local |
| 182 | + // NUMA node. |
| 183 | + UT_ASSERT(std::any_of(localNodes.begin(), localNodes.end(), |
| 184 | + [&allocNodeObj](hwloc_obj_t node) { |
| 185 | + return node == allocNodeObj; |
| 186 | + })); |
| 187 | + |
| 188 | + ret = umfMemoryProviderFree(hProvider, ptr, size); |
| 189 | + UT_ASSERTeq(ret, UMF_RESULT_SUCCESS); |
| 190 | + |
| 191 | + hwloc_topology_destroy(topology); |
| 192 | + }; |
| 193 | + |
| 194 | + const auto cpus = getAllCpus(); |
| 195 | + std::vector<std::thread> threads; |
| 196 | + for (auto cpu : cpus) { |
| 197 | + threads.emplace_back(pinAllocValidate, hProvider, cpu); |
| 198 | + } |
| 199 | + |
| 200 | + for (auto &thread : threads) { |
| 201 | + thread.join(); |
| 202 | + } |
19 | 203 | }
|
0 commit comments