|
| 1 | +// Copyright (C) 2023 Intel Corporation |
| 2 | +// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT. |
| 3 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 4 | +// This file contains tests for UMF provider API |
| 5 | + |
| 6 | +#include <random> |
| 7 | +#include <set> |
| 8 | +#include <string> |
| 9 | +#include <unordered_map> |
| 10 | + |
| 11 | +#include "base.hpp" |
| 12 | + |
| 13 | +#include <umf/memory_pool.h> |
| 14 | +#include <umf/memory_provider.h> |
| 15 | +#include <umf/pools/pool_tbb.h> |
| 16 | +#include <umf/providers/provider_os_memory.h> |
| 17 | +// #include "provider_os_memory_internal.h" |
| 18 | + |
| 19 | +using umf_test::test; |
| 20 | + |
| 21 | +constexpr int PAGE_SIZE = 4 * 1024; |
| 22 | + |
| 23 | +static umf_os_memory_provider_params_t UMF_OS_MEMORY_PROVIDER_PARAMS = { |
| 24 | + /* .protection = */ UMF_PROTECTION_READ | UMF_PROTECTION_WRITE, |
| 25 | + /* .visibility = */ UMF_VISIBILITY_PRIVATE, |
| 26 | + |
| 27 | + // NUMA config |
| 28 | + /* .nodemask = */ NULL, |
| 29 | + /* .maxnode = */ 0, |
| 30 | + /* .numa_mode = */ UMF_NUMA_MODE_DEFAULT, |
| 31 | + /* .numa_flags = */ 0, |
| 32 | + |
| 33 | + // others |
| 34 | + /* .traces = */ 1, |
| 35 | +}; |
| 36 | + |
| 37 | +const size_t KB = 1024; |
| 38 | +const size_t MB = 1024 * KB; |
| 39 | +const size_t init_buffer_size = 20 * MB; |
| 40 | + |
| 41 | +TEST_F(test, pool_tbb_MallocPool_basic) { |
| 42 | + umf_memory_provider_handle_t os_memory_provider; |
| 43 | + umfMemoryProviderCreate(&UMF_OS_MEMORY_PROVIDER_OPS, |
| 44 | + &UMF_OS_MEMORY_PROVIDER_PARAMS, |
| 45 | + &os_memory_provider); |
| 46 | + ASSERT_NE(os_memory_provider, nullptr); |
| 47 | + |
| 48 | + umf_memory_pool_handle_t pool; |
| 49 | + umfPoolCreate(&UMF_TBB_POOL_OPS, os_memory_provider, NULL, &pool); |
| 50 | + ASSERT_NE(pool, nullptr); |
| 51 | + |
| 52 | + // test |
| 53 | + |
| 54 | + umf_memory_provider_handle_t prov = NULL; |
| 55 | + umfPoolGetMemoryProvider(pool, &prov); |
| 56 | + ASSERT_NE(prov, nullptr); |
| 57 | + |
| 58 | + // alloc 2x 2MB |
| 59 | + void *p1 = umfPoolMalloc(pool, 2 * MB); |
| 60 | + void *p2 = umfPoolMalloc(pool, 2 * MB); |
| 61 | + ASSERT_NE(p1, p2); |
| 62 | + |
| 63 | + // swap pointers to get p1 < p2 |
| 64 | + if (p1 > p2) { |
| 65 | + std::swap(p1, p2); |
| 66 | + } |
| 67 | + |
| 68 | + // free + alloc first block |
| 69 | + // the block should be reused |
| 70 | + // currently there is no purging, so the alloc size shouldn't change |
| 71 | + // there should be no block merging between used and not-used blocks |
| 72 | + umf_result_t res = umfPoolFree(pool, p1); |
| 73 | + ASSERT_EQ(res, UMF_RESULT_SUCCESS); |
| 74 | + p1 = umfPoolMalloc(pool, 2 * MB); |
| 75 | + |
| 76 | + // free all allocs |
| 77 | + // overall alloc size shouldn't change |
| 78 | + // block p2 should merge with the prev free block p1 |
| 79 | + // and the remaining init block |
| 80 | + res = umfPoolFree(pool, p1); |
| 81 | + ASSERT_EQ(res, UMF_RESULT_SUCCESS); |
| 82 | + res = umfPoolFree(pool, p2); |
| 83 | + ASSERT_EQ(res, UMF_RESULT_SUCCESS); |
| 84 | + |
| 85 | + // alloc whole buffer |
| 86 | + // after this, there should be one single block |
| 87 | + p1 = umfPoolMalloc(pool, init_buffer_size); |
| 88 | + |
| 89 | + // free all memory |
| 90 | + // alloc 2 MB block - the init block should be split |
| 91 | + res = umfPoolFree(pool, p1); |
| 92 | + p1 = umfPoolMalloc(pool, 2 * MB); |
| 93 | + |
| 94 | + // alloc additional 2 MB |
| 95 | + // the non-used block should be used |
| 96 | + p2 = umfPoolMalloc(pool, 2 * MB); |
| 97 | + ASSERT_NE(p1, p2); |
| 98 | + |
| 99 | + // make sure that p1 < p2 |
| 100 | + if (p1 > p2) { |
| 101 | + std::swap(p1, p2); |
| 102 | + } |
| 103 | + |
| 104 | + // free blocks in order: p2, p1 |
| 105 | + // block p1 should merge with the next block p2 |
| 106 | + // swap pointers to get p1 < p2 |
| 107 | + umfPoolFree(pool, p2); |
| 108 | + umfPoolFree(pool, p1); |
| 109 | + |
| 110 | + // alloc 10x 2 MB - this should occupy all allocated memory |
| 111 | + constexpr int allocs_size = 10; |
| 112 | + void *allocs[allocs_size] = {0}; |
| 113 | + for (int i = 0; i < allocs_size; i++) { |
| 114 | + allocs[i] = umfPoolMalloc(pool, 2 * MB); |
| 115 | + ASSERT_NE(allocs[i], nullptr); |
| 116 | + } |
| 117 | + // there should be no block with the free memory |
| 118 | + |
| 119 | + // free all memory |
| 120 | + for (int i = 0; i < allocs_size; i++) { |
| 121 | + res = umfPoolFree(pool, allocs[i]); |
| 122 | + ASSERT_EQ(res, UMF_RESULT_SUCCESS); |
| 123 | + } |
| 124 | + |
| 125 | + umfPoolDestroy(pool); |
| 126 | + umfMemoryProviderDestroy(os_memory_provider); |
| 127 | +} |
| 128 | + |
| 129 | +TEST_F(test, pool_tbb_MallocPool_simple1) { |
| 130 | + umf_memory_provider_handle_t os_memory_provider; |
| 131 | + umfMemoryProviderCreate(&UMF_OS_MEMORY_PROVIDER_OPS, |
| 132 | + &UMF_OS_MEMORY_PROVIDER_PARAMS, |
| 133 | + &os_memory_provider); |
| 134 | + ASSERT_NE(os_memory_provider, nullptr); |
| 135 | + |
| 136 | + umf_memory_pool_handle_t pool; |
| 137 | + umfPoolCreate(&UMF_TBB_POOL_OPS, os_memory_provider, NULL, &pool); |
| 138 | + ASSERT_NE(pool, nullptr); |
| 139 | + |
| 140 | + umf_memory_provider_handle_t prov = NULL; |
| 141 | + umfPoolGetMemoryProvider(pool, &prov); |
| 142 | + ASSERT_NE(prov, nullptr); |
| 143 | + |
| 144 | + // test 1 |
| 145 | + |
| 146 | + size_t s1 = 74659 * KB; |
| 147 | + size_t s2 = 8206 * KB; |
| 148 | + |
| 149 | + size_t max_alloc_size = 0; |
| 150 | + |
| 151 | + // s1 |
| 152 | + for (int j = 0; j < 2; j++) { |
| 153 | + void *t[6] = {0}; |
| 154 | + for (int i = 0; i < 6; i++) { |
| 155 | + t[i] = umfPoolMalloc(pool, s1); |
| 156 | + ASSERT_NE(t[i], nullptr); |
| 157 | + } |
| 158 | + |
| 159 | + if (max_alloc_size == 0) { |
| 160 | + } |
| 161 | + |
| 162 | + for (int i = 0; i < 6; i++) { |
| 163 | + umf_result_t res = umfPoolFree(pool, t[i]); |
| 164 | + ASSERT_EQ(res, UMF_RESULT_SUCCESS); |
| 165 | + } |
| 166 | + } |
| 167 | + |
| 168 | + // s2 |
| 169 | + for (int j = 0; j < 2; j++) { |
| 170 | + void *t[6] = {0}; |
| 171 | + for (int i = 0; i < 6; i++) { |
| 172 | + t[i] = umfPoolMalloc(pool, s2); |
| 173 | + ASSERT_NE(t[i], nullptr); |
| 174 | + } |
| 175 | + |
| 176 | + for (int i = 0; i < 6; i++) { |
| 177 | + umf_result_t res = umfPoolFree(pool, t[i]); |
| 178 | + ASSERT_EQ(res, UMF_RESULT_SUCCESS); |
| 179 | + } |
| 180 | + } |
| 181 | + |
| 182 | + umfPoolDestroy(pool); |
| 183 | + umfMemoryProviderDestroy(os_memory_provider); |
| 184 | +} |
| 185 | + |
| 186 | +TEST_F(test, pool_tbb_MallocPool_simple2) { |
| 187 | + umf_memory_provider_handle_t os_memory_provider; |
| 188 | + umfMemoryProviderCreate(&UMF_OS_MEMORY_PROVIDER_OPS, |
| 189 | + &UMF_OS_MEMORY_PROVIDER_PARAMS, |
| 190 | + &os_memory_provider); |
| 191 | + ASSERT_NE(os_memory_provider, nullptr); |
| 192 | + |
| 193 | + umf_memory_pool_handle_t pool; |
| 194 | + umfPoolCreate(&UMF_TBB_POOL_OPS, os_memory_provider, NULL, &pool); |
| 195 | + ASSERT_NE(pool, nullptr); |
| 196 | + |
| 197 | + // test |
| 198 | + double sizes[] = {2, 4, 0.5, 1, 8, 0.25}; |
| 199 | + for (int i = 0; i < 6; i++) { |
| 200 | + size_t s = (size_t)(sizes[i] * MB); |
| 201 | + void *t[8] = {0}; |
| 202 | + for (int j = 0; j < 8; j++) { |
| 203 | + t[j] = umfPoolMalloc(pool, s); |
| 204 | + ASSERT_NE(t[j], nullptr); |
| 205 | + } |
| 206 | + |
| 207 | + for (int j = 0; j < 8; j++) { |
| 208 | + umf_result_t res = umfPoolFree(pool, t[j]); |
| 209 | + ASSERT_EQ(res, UMF_RESULT_SUCCESS); |
| 210 | + } |
| 211 | + } |
| 212 | + |
| 213 | + umfPoolDestroy(pool); |
| 214 | + umfMemoryProviderDestroy(os_memory_provider); |
| 215 | +} |
| 216 | + |
| 217 | +TEST_F(test, pool_tbb_MallocPool_random) { |
| 218 | + umf_memory_provider_handle_t os_memory_provider; |
| 219 | + umfMemoryProviderCreate(&UMF_OS_MEMORY_PROVIDER_OPS, |
| 220 | + &UMF_OS_MEMORY_PROVIDER_PARAMS, |
| 221 | + &os_memory_provider); |
| 222 | + ASSERT_NE(os_memory_provider, nullptr); |
| 223 | + |
| 224 | + umf_memory_pool_handle_t pool; |
| 225 | + umfPoolCreate(&UMF_TBB_POOL_OPS, os_memory_provider, NULL, &pool); |
| 226 | + ASSERT_NE(pool, nullptr); |
| 227 | + |
| 228 | + // set constant seed so each test run will have the same scenario |
| 229 | + uint32_t seed = 1234; |
| 230 | + std::mt19937 mt(seed); |
| 231 | + |
| 232 | + // different sizes to alloc |
| 233 | + std::vector<size_t> sizes = { |
| 234 | + 15, 49, 588, 1025, 2 * KB, 5 * KB, |
| 235 | + 160 * KB, 511 * KB, 1000 * KB, MB, 3 * MB, 7 * MB, |
| 236 | + 19 * MB, 26 * MB, 69 * MB, 109 * MB, 111 * MB}; |
| 237 | + std::uniform_int_distribution<int> sizes_dist(0, (int)(sizes.size() - 1)); |
| 238 | + |
| 239 | + // each alloc would be done few times |
| 240 | + std::vector<size_t> counts = {1, 3, 4, 8, 9, 11}; |
| 241 | + std::uniform_int_distribution<int> counts_dist(0, (int)(counts.size() - 1)); |
| 242 | + |
| 243 | + // action to take will be random |
| 244 | + // alloc = <0, .5), free = <.5, 1) |
| 245 | + std::uniform_real_distribution<float> actions_dist(0, 1); |
| 246 | + |
| 247 | + std::set<void *> allocs; |
| 248 | + for (size_t i = 0; i < 100; i++) { |
| 249 | + size_t size = sizes[sizes_dist(mt)]; |
| 250 | + size_t count = counts[counts_dist(mt)]; |
| 251 | + float action = actions_dist(mt); |
| 252 | + |
| 253 | + std::cout << "size: " << size << " count: " << count |
| 254 | + << " action: " << ((action < 0.5) ? "alloc" : "free") |
| 255 | + << std::endl; |
| 256 | + |
| 257 | + if (action < 0.5) { |
| 258 | + // alloc |
| 259 | + for (size_t j = 0; j < count; j++) { |
| 260 | + void *ptr = umfPoolMalloc(pool, size); |
| 261 | + ASSERT_NE(ptr, nullptr); |
| 262 | + |
| 263 | + allocs.insert(ptr); |
| 264 | + } |
| 265 | + } else { |
| 266 | + // free random allocs |
| 267 | + for (size_t j = 0; j < count; j++) { |
| 268 | + if (allocs.size() == 0) { |
| 269 | + continue; |
| 270 | + } |
| 271 | + |
| 272 | + std::uniform_int_distribution<int> free_dist( |
| 273 | + 0, (int)(allocs.size() - 1)); |
| 274 | + size_t free_id = free_dist(mt); |
| 275 | + auto it = allocs.begin(); |
| 276 | + std::advance(it, free_id); |
| 277 | + void *ptr = (*it); |
| 278 | + ASSERT_NE(ptr, nullptr); |
| 279 | + |
| 280 | + umf_result_t ret = umfPoolFree(pool, ptr); |
| 281 | + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); |
| 282 | + |
| 283 | + allocs.erase(ptr); |
| 284 | + } |
| 285 | + } |
| 286 | + } |
| 287 | + |
| 288 | + std::cout << "cleanup" << std::endl; |
| 289 | + |
| 290 | + while (allocs.size()) { |
| 291 | + umf_result_t ret = umfPoolFree(pool, *allocs.begin()); |
| 292 | + ASSERT_EQ(ret, UMF_RESULT_SUCCESS); |
| 293 | + allocs.erase(allocs.begin()); |
| 294 | + } |
| 295 | + |
| 296 | + umfPoolDestroy(pool); |
| 297 | + umfMemoryProviderDestroy(os_memory_provider); |
| 298 | +} |
0 commit comments