Skip to content

refactor numanode assert in tests #664

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 0 additions & 34 deletions test/common/numa_helpers.h

This file was deleted.

50 changes: 50 additions & 0 deletions test/common/numa_helpers.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
// Copyright (C) 2024 Intel Corporation
// Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

#ifndef UMF_TEST_NUMA_HELPERS_HPP
#define UMF_TEST_NUMA_HELPERS_HPP 1

#include <gtest/gtest.h>
#include <numa.h>
#include <numaif.h>
#include <stdint.h>
#include <stdio.h>

#include "test_helpers.h"

// returns the node where page starting at 'ptr' resides
static inline void getNumaNodeByPtr(void *ptr, int *node) {
int nodeId;
int ret =
get_mempolicy(&nodeId, nullptr, 0, ptr, MPOL_F_ADDR | MPOL_F_NODE);

ASSERT_EQ(ret, 0) << "get_mempolicy failed";
ASSERT_GE(nodeId, 0)
<< "get_mempolicy returned nodeId < 0 - should never happen";

*node = nodeId;
}

static inline void _assertNode(void *ptr, int nodeId, bool fatal) {
int node = -1;

getNumaNodeByPtr(ptr, &node);
if (testing::Test::HasFatalFailure()) {
return;
}
if (fatal) {
ASSERT_EQ(nodeId, node);
} else {
EXPECT_EQ(nodeId, node);
}
}

//Asserts that given nodeId is equal to the node where given ptr resides
#define ASSERT_NODE_EQ(ptr, nodeId) \
ASSERT_NO_FATAL_FAILURE(_assertNode(ptr, nodeId, true))

#define EXPECT_NODE_EQ(ptr, nodeId) \
ASSERT_NO_FATAL_FAILURE(_assertNode(ptr, nodeId, false))

#endif /* UMF_TEST_NUMA_HELPERS_HPP */
7 changes: 5 additions & 2 deletions test/memspaces/memspace_highest_capacity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include "memspace_helpers.hpp"
#include "memspace_internal.h"
#include "memtarget_numa.h"
#include "numa_helpers.h"
#include "numa_helpers.hpp"
#include "test_helpers.h"

#include <numa.h>
Expand Down Expand Up @@ -60,7 +60,10 @@ TEST_F(memspaceHighestCapacityProviderTest, highestCapacityVerify) {
memset(ptr, 0, alloc_size);
ASSERT_EQ(ret, UMF_RESULT_SUCCESS);

auto nodeId = getNumaNodeByPtr(ptr);
int nodeId;

ASSERT_NO_FATAL_FAILURE(getNumaNodeByPtr(ptr, &nodeId));

ASSERT_TRUE(std::any_of(maxCapacityNodes.begin(), maxCapacityNodes.end(),
[nodeId](int node) { return nodeId == node; }));

Expand Down
2 changes: 1 addition & 1 deletion test/memspaces/memspace_host_all.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
#include "memspace_helpers.hpp"
#include "memspace_internal.h"
#include "memtarget_numa.h"
#include "numa_helpers.h"
#include "numa_helpers.hpp"
#include "test_helpers.h"
#include "utils_sanitizers.h"

Expand Down
53 changes: 24 additions & 29 deletions test/provider_os_memory_multiple_numa_nodes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

#include "base.hpp"
#include "numa_helpers.h"
#include "numa_helpers.hpp"
#include "test_helpers.h"

#include <algorithm>
Expand Down Expand Up @@ -122,14 +122,10 @@ struct testNuma : testing::Test {
};

struct testNumaOnEachNode : testNuma, testing::WithParamInterface<unsigned> {};
struct testNumaOnEachCpu : testNuma, testing::WithParamInterface<int> {};

INSTANTIATE_TEST_SUITE_P(testNumaNodesAllocations, testNumaOnEachNode,
::testing::ValuesIn(get_available_numa_nodes()));

INSTANTIATE_TEST_SUITE_P(testNumaNodesAllocationsAllCpus, testNumaOnEachCpu,
::testing::ValuesIn(get_available_cpus()));

// Test for allocations on numa nodes. It will be executed on each of
// the available numa nodes.
TEST_P(testNumaOnEachNode, checkNumaNodesAllocations) {
Expand All @@ -152,8 +148,7 @@ TEST_P(testNumaOnEachNode, checkNumaNodesAllocations) {

// 'ptr' must point to an initialized value before retrieving its numa node
memset(ptr, 0xFF, alloc_size);
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
EXPECT_EQ(retrieved_numa_node_number, numa_node_number);
EXPECT_NODE_EQ(ptr, numa_node_number);
}

// Test for allocations on numa nodes with mode preferred. It will be executed
Expand All @@ -177,8 +172,7 @@ TEST_P(testNumaOnEachNode, checkModePreferred) {

// 'ptr' must point to an initialized value before retrieving its numa node
memset(ptr, 0xFF, alloc_size);
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
EXPECT_EQ(retrieved_numa_node_number, numa_node_number);
EXPECT_NODE_EQ(ptr, numa_node_number);
}

// Test for allocation on numa node with default mode enabled.
Expand All @@ -202,8 +196,7 @@ TEST_P(testNumaOnEachNode, checkModeDefaultSetMempolicy) {

// 'ptr' must point to an initialized value before retrieving its numa node
memset(ptr, 0xFF, alloc_size);
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
EXPECT_EQ(retrieved_numa_node_number, numa_node_number);
EXPECT_NODE_EQ(ptr, numa_node_number);
}

// Test for allocations on a single numa node with interleave mode enabled.
Expand All @@ -229,10 +222,14 @@ TEST_P(testNumaOnEachNode, checkModeInterleaveSingleNode) {

// 'ptr' must point to an initialized value before retrieving its numa node
memset(ptr, 0xFF, pages_num * page_size);
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
EXPECT_EQ(retrieved_numa_node_number, numa_node_number);
EXPECT_NODE_EQ(ptr, numa_node_number);
}

struct testNumaOnEachCpu : testNuma, testing::WithParamInterface<int> {};

INSTANTIATE_TEST_SUITE_P(testNumaNodesAllocationsAllCpus, testNumaOnEachCpu,
::testing::ValuesIn(get_available_cpus()));

// Test for allocation on numa node with mode preferred and an empty nodeset.
// For the empty nodeset the memory is allocated on the node of the CPU that
// triggered the allocation. It will be executed on each available CPU.
Expand Down Expand Up @@ -269,8 +266,7 @@ TEST_P(testNumaOnEachCpu, checkModePreferredEmptyNodeset) {

// 'ptr' must point to an initialized value before retrieving its numa node
memset(ptr, 0xFF, alloc_size);
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
EXPECT_EQ(retrieved_numa_node_number, numa_node_number);
EXPECT_NODE_EQ(ptr, numa_node_number);
}

// Test for allocation on numa node with local mode enabled. The memory is
Expand Down Expand Up @@ -307,8 +303,7 @@ TEST_P(testNumaOnEachCpu, checkModeLocal) {

// 'ptr' must point to an initialized value before retrieving its numa node
memset(ptr, 0xFF, alloc_size);
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
EXPECT_EQ(retrieved_numa_node_number, numa_node_number);
EXPECT_NODE_EQ(ptr, numa_node_number);
}

// Test for allocation on numa node with default mode enabled.
Expand All @@ -332,8 +327,7 @@ TEST_F(testNuma, checkModeDefault) {

// 'ptr' must point to an initialized value before retrieving its numa node
memset(ptr, 0xFF, alloc_size);
int retrieved_numa_node_number = getNumaNodeByPtr(ptr);
EXPECT_EQ(retrieved_numa_node_number, numa_node_number);
EXPECT_NODE_EQ(ptr, numa_node_number);
}

// Test for allocations on numa nodes with interleave mode enabled.
Expand Down Expand Up @@ -363,11 +357,11 @@ TEST_F(testNuma, checkModeInterleave) {

// Test where each page will be allocated.
// Get the first numa node for ptr; Each next page is expected to be on next nodes.
size_t index = getNumaNodeByPtr((char *)ptr);
int index = 0;
ASSERT_NO_FATAL_FAILURE(getNumaNodeByPtr(ptr, &index));
for (size_t i = 1; i < (size_t)pages_num; i++) {
index = (index + 1) % numa_nodes.size();
ASSERT_EQ(numa_nodes[index],
getNumaNodeByPtr((char *)ptr + page_size * i));
EXPECT_NODE_EQ((char *)ptr + page_size * i, numa_nodes[index]);
}

bitmask *retrieved_nodemask = retrieve_nodemask(ptr);
Expand Down Expand Up @@ -407,13 +401,11 @@ TEST_F(testNuma, checkModeInterleaveCustomPartSize) {
memset(ptr, 0xFF, size);
// Test where each page will be allocated.
// Get the first numa node for ptr; Each next part is expected to be on next nodes.
size_t index = getNumaNodeByPtr((char *)ptr);
int index = 0;
ASSERT_NO_FATAL_FAILURE(getNumaNodeByPtr(ptr, &index));
for (size_t i = 0; i < (size_t)part_num; i++) {
for (size_t j = 0; j < part_size; j += page_size) {
EXPECT_EQ(numa_nodes[index],
getNumaNodeByPtr((char *)ptr + part_size * i + j))
<< "for ptr " << ptr << " + " << part_size << " * " << i
<< " + " << j;
ASSERT_NODE_EQ((char *)ptr + part_size * i + j, numa_nodes[index]);
}
index = (index + 1) % numa_nodes.size();
}
Expand All @@ -425,7 +417,7 @@ TEST_F(testNuma, checkModeInterleaveCustomPartSize) {
ASSERT_EQ(umf_result, UMF_RESULT_SUCCESS);
ASSERT_NE(ptr, nullptr);
memset(ptr, 0xFF, size);
EXPECT_EQ(numa_nodes[index], getNumaNodeByPtr(ptr));
EXPECT_NODE_EQ(ptr, numa_nodes[index]);
umfMemoryProviderFree(os_memory_provider, ptr, size);
}

Expand Down Expand Up @@ -627,7 +619,10 @@ TEST_F(testNuma, checkModeBindOnAllNodes) {

// 'ptr' must point to an initialized value before retrieving its numa node
memset(ptr, 0xFF, alloc_size);
unsigned retrieved_numa_node_number = (unsigned)getNumaNodeByPtr(ptr);

int node;
ASSERT_NO_FATAL_FAILURE(getNumaNodeByPtr(ptr, &node));
unsigned retrieved_numa_node_number = (unsigned)node;

int read_cpu = sched_getcpu();
int read_numa_node = numa_node_of_cpu(read_cpu);
Expand Down
Loading