Skip to content

use unique_ptr instead of constructor/destructor in benchmarks #984

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
111 changes: 50 additions & 61 deletions benchmark/benchmark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,27 @@ struct glibc_malloc : public allocator_interface {
};

struct os_provider : public provider_interface {
umf_os_memory_provider_params_handle_t params = NULL;
os_provider() {
umfOsMemoryProviderParamsCreate(&params);
return;
}

~os_provider() {
if (params != NULL) {
umfOsMemoryProviderParamsDestroy(params);
provider_interface::params_ptr
getParams(::benchmark::State &state) override {
umf_os_memory_provider_params_handle_t raw_params = nullptr;
umfOsMemoryProviderParamsCreate(&raw_params);
if (!raw_params) {
state.SkipWithError("Failed to create os provider params");
return {nullptr, [](void *) {}};
}

// Use a lambda as the custom deleter
auto deleter = [](void *p) {
auto handle =
static_cast<umf_os_memory_provider_params_handle_t>(p);
umfOsMemoryProviderParamsDestroy(handle);
};

return {static_cast<void *>(raw_params), deleter};
}

void *getParams() override { return params; }
umf_memory_provider_ops_t *getOps() override {
umf_memory_provider_ops_t *
getOps([[maybe_unused]] ::benchmark::State &state) override {
return umfOsMemoryProviderOps();
}
static std::string name() { return "os_provider"; }
Expand All @@ -62,73 +69,60 @@ struct proxy_pool : public pool_interface<Provider> {
getOps([[maybe_unused]] ::benchmark::State &state) override {
return umfProxyPoolOps();
}
void *getParams([[maybe_unused]] ::benchmark::State &state) override {
return nullptr;
}

static std::string name() { return "proxy_pool<" + Provider::name() + ">"; }
};

#ifdef UMF_POOL_DISJOINT_ENABLED
template <typename Provider>
struct disjoint_pool : public pool_interface<Provider> {
umf_disjoint_pool_params_handle_t disjoint_memory_pool_params;
umf_memory_pool_ops_t *
getOps([[maybe_unused]] ::benchmark::State &state) override {
return umfDisjointPoolOps();
}

disjoint_pool() {
disjoint_memory_pool_params = NULL;
auto ret = umfDisjointPoolParamsCreate(&disjoint_memory_pool_params);
typename pool_interface<Provider>::params_ptr
getParams(::benchmark::State &state) override {
umf_disjoint_pool_params_handle_t raw_params = nullptr;
auto ret = umfDisjointPoolParamsCreate(&raw_params);
if (ret != UMF_RESULT_SUCCESS) {
return;
state.SkipWithError("Failed to create disjoint pool params");
return {nullptr, [](void *) {}};
}

// those function should never fail, so error handling is minimal.
ret = umfDisjointPoolParamsSetSlabMinSize(disjoint_memory_pool_params,
4096);
if (ret != UMF_RESULT_SUCCESS) {
goto err;
}
typename pool_interface<Provider>::params_ptr params(
raw_params, [](void *p) {
umfDisjointPoolParamsDestroy(
static_cast<umf_disjoint_pool_params_handle_t>(p));
});

ret = umfDisjointPoolParamsSetCapacity(disjoint_memory_pool_params, 4);
ret = umfDisjointPoolParamsSetSlabMinSize(raw_params, 4096);
if (ret != UMF_RESULT_SUCCESS) {
goto err;
state.SkipWithError("Failed to set slab min size");
return {nullptr, [](void *) {}};
}

ret = umfDisjointPoolParamsSetMinBucketSize(disjoint_memory_pool_params,
4096);
ret = umfDisjointPoolParamsSetCapacity(raw_params, 4);
if (ret != UMF_RESULT_SUCCESS) {
goto err;
state.SkipWithError("Failed to set capacity");
return {nullptr, [](void *) {}};
}

ret = umfDisjointPoolParamsSetMaxPoolableSize(
disjoint_memory_pool_params, 4096 * 16);

ret = umfDisjointPoolParamsSetMinBucketSize(raw_params, 4096);
if (ret != UMF_RESULT_SUCCESS) {
goto err;
state.SkipWithError("Failed to set min bucket size");
return {nullptr, [](void *) {}};
}
return;
err:

umfDisjointPoolParamsDestroy(disjoint_memory_pool_params);
disjoint_memory_pool_params = NULL;
}

~disjoint_pool() {
if (disjoint_memory_pool_params != NULL) {
umfDisjointPoolParamsDestroy(disjoint_memory_pool_params);
ret = umfDisjointPoolParamsSetMaxPoolableSize(raw_params, 4096 * 16);
if (ret != UMF_RESULT_SUCCESS) {
state.SkipWithError("Failed to set max poolable size");
return {nullptr, [](void *) {}};
}
}

umf_memory_pool_ops_t *
getOps([[maybe_unused]] ::benchmark::State &state) override {
return umfDisjointPoolOps();
return params;
}
void *getParams([[maybe_unused]] ::benchmark::State &state) override {

if (disjoint_memory_pool_params == NULL) {
state.SkipWithError("Failed to create disjoint pool params");
}

return disjoint_memory_pool_params;
}
static std::string name() {
return "disjoint_pool<" + Provider::name() + ">";
}
Expand All @@ -142,9 +136,7 @@ struct jemalloc_pool : public pool_interface<Provider> {
getOps([[maybe_unused]] ::benchmark::State &state) override {
return umfJemallocPoolOps();
}
void *getParams([[maybe_unused]] ::benchmark::State &state) override {
return NULL;
}

static std::string name() {
return "jemalloc_pool<" + Provider::name() + ">";
}
Expand All @@ -158,10 +150,7 @@ struct scalable_pool : public pool_interface<Provider> {
getOps([[maybe_unused]] ::benchmark::State &state) override {
return umfScalablePoolOps();
}
virtual void *
getParams([[maybe_unused]] ::benchmark::State &state) override {
return NULL;
}

static std::string name() {
return "scalable_pool<" + Provider::name() + ">";
}
Expand Down
8 changes: 8 additions & 0 deletions benchmark/benchmark.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -232,12 +232,14 @@ class alloc_benchmark : public benchmark_interface<Size, Alloc> {
state.ResumeTiming();
}
}

static std::vector<std::string> argsName() {
auto n = benchmark_interface<Size, Alloc>::argsName();
std::vector<std::string> res = {"max_allocs", "pre_allocs"};
res.insert(res.end(), n.begin(), n.end());
return res;
}

static std::string name() { return base::name() + "/alloc"; }
static int64_t iterations() { return 200000; }

Expand Down Expand Up @@ -320,13 +322,16 @@ class multiple_malloc_free_benchmark : public alloc_benchmark<Size, Alloc> {
static std::string name() {
return base::base::name() + "/multiple_malloc_free";
}

static std::vector<std::string> argsName() {
auto n = benchmark_interface<Size, Alloc>::argsName();
std::vector<std::string> res = {"max_allocs"};
res.insert(res.end(), n.begin(), n.end());
return res;
}

static int64_t iterations() { return 2000; }

std::default_random_engine generator;
distribution dist;
};
Expand All @@ -352,9 +357,11 @@ class provider_allocator : public allocator_interface {
}
return ptr;
}

void benchFree(void *ptr, size_t size) override {
umfMemoryProviderFree(provider.provider, ptr, size);
}

static std::string name() { return Provider::name(); }

private:
Expand All @@ -374,6 +381,7 @@ template <typename Pool> class pool_allocator : public allocator_interface {
virtual void *benchAlloc(size_t size) override {
return umfPoolMalloc(pool.pool, size);
}

virtual void benchFree(void *ptr, [[maybe_unused]] size_t size) override {
umfPoolFree(pool.pool, ptr);
}
Expand Down
27 changes: 21 additions & 6 deletions benchmark/benchmark_interfaces.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
*
*/

#include <memory>
#include <string>
#include <thread>
#include <vector>
Expand Down Expand Up @@ -39,6 +40,7 @@ struct benchmark_interface : public benchmark::Fixture {
int argPos = alloc_size.SetUp(state, 0);
allocator.SetUp(state, argPos);
}

void TearDown(::benchmark::State &state) {
alloc_size.TearDown(state);
allocator.TearDown(state);
Expand All @@ -54,20 +56,24 @@ struct benchmark_interface : public benchmark::Fixture {
res.insert(res.end(), a.begin(), a.end());
return res;
}

static std::string name() { return Allocator::name(); }
static int64_t iterations() { return 10000; }
Size alloc_size;
Allocator allocator;
};

struct provider_interface {
using params_ptr = std::unique_ptr<void, void (*)(void *)>;

umf_memory_provider_handle_t provider = NULL;
virtual void SetUp(::benchmark::State &state) {
if (state.thread_index() != 0) {
return;
}
auto params = getParams(state);
auto umf_result =
umfMemoryProviderCreate(getOps(), getParams(), &provider);
umfMemoryProviderCreate(getOps(state), params.get(), &provider);
if (umf_result != UMF_RESULT_SUCCESS) {
state.SkipWithError("umfMemoryProviderCreate() failed");
}
Expand All @@ -83,21 +89,30 @@ struct provider_interface {
}
}

virtual umf_memory_provider_ops_t *getOps() { return nullptr; }
virtual void *getParams() { return nullptr; }
virtual umf_memory_provider_ops_t *
getOps([[maybe_unused]] ::benchmark::State &state) {
return nullptr;
}

virtual params_ptr getParams([[maybe_unused]] ::benchmark::State &state) {
return {nullptr, [](void *) {}};
}
};

template <typename T,
typename =
std::enable_if_t<std::is_base_of<provider_interface, T>::value>>
struct pool_interface {
using params_ptr = std::unique_ptr<void, void (*)(void *)>;

virtual void SetUp(::benchmark::State &state) {
provider.SetUp(state);
if (state.thread_index() != 0) {
return;
}
auto params = getParams(state);
auto umf_result = umfPoolCreate(getOps(state), provider.provider,
getParams(state), 0, &pool);
params.get(), 0, &pool);
if (umf_result != UMF_RESULT_SUCCESS) {
state.SkipWithError("umfPoolCreate() failed");
}
Expand All @@ -121,8 +136,8 @@ struct pool_interface {
getOps([[maybe_unused]] ::benchmark::State &state) {
return nullptr;
}
virtual void *getParams([[maybe_unused]] ::benchmark::State &state) {
return nullptr;
virtual params_ptr getParams([[maybe_unused]] ::benchmark::State &state) {
return {nullptr, [](void *) {}};
}
T provider;
umf_memory_pool_handle_t pool;
Expand Down
Loading