Skip to content

Merge v0.10.x into main #985

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/reusable_gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -129,5 +129,5 @@ jobs:
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: ${{ matrix.build_type == 'Debug' && matrix.os == 'Ubuntu' }}
with:
name: ${{env.COVERAGE_NAME}}-${{matrix.os}}-${{matrix.build_type}}-shared-${{matrix.shared_library}}
name: ${{env.COVERAGE_NAME}}-shared-${{matrix.shared_library}}
path: ${{env.COVERAGE_DIR}}
6 changes: 3 additions & 3 deletions .github/workflows/reusable_multi_numa.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ jobs:
--gtest_filter="-*checkModeLocal/*:*checkModePreferredEmptyNodeset/*:testNuma.checkModeInterleave"

- name: Check coverage
if: matrix.os == 'ubuntu-22.04'
if: ${{ matrix.build_type == 'Debug' && matrix.os == 'ubuntu-22.04' }}
working-directory: ${{env.BUILD_DIR}}
run: |
export COVERAGE_FILE_NAME=${{env.COVERAGE_NAME}}-${{matrix.os}}-shared-${{matrix.shared_library}}
Expand All @@ -79,7 +79,7 @@ jobs:
mv ./$COVERAGE_FILE_NAME ${{env.COVERAGE_DIR}}

- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
if: matrix.os == 'ubuntu-22.04'
if: ${{ matrix.build_type == 'Debug' && matrix.os == 'ubuntu-22.04' }}
with:
name: ${{env.COVERAGE_NAME}}-${{matrix.os}}-${{matrix.build_type}}-shared-${{matrix.shared_library}}
name: ${{env.COVERAGE_NAME}}-${{matrix.os}}-shared-${{matrix.shared_library}}
path: ${{env.COVERAGE_DIR}}
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,8 @@ else()
./configure --prefix=${hwloc_targ_BINARY_DIR}
--enable-static=yes --enable-shared=no --disable-libxml2
--disable-pci --disable-levelzero --disable-opencl
--disable-cuda --disable-nvml --disable-libudev CFLAGS=-fPIC
CXXFLAGS=-fPIC
--disable-cuda --disable-nvml --disable-libudev --disable-rsmi
CFLAGS=-fPIC CXXFLAGS=-fPIC
WORKING_DIRECTORY ${hwloc_targ_SOURCE_DIR}
OUTPUT ${hwloc_targ_SOURCE_DIR}/Makefile
DEPENDS ${hwloc_targ_SOURCE_DIR}/configure)
Expand Down
23 changes: 23 additions & 0 deletions ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,26 @@
Mon Dec 09 2024 Łukasz Stolarczuk <[email protected]>

* Version 0.10.0

In this release we introduced updates in several areas, listed below.
We still don't yet guarantee a fully stable API, though.
With new parameters' API we broke the compatibility, as we no longer
support direct access to UMF params via (now internal) structures.

Significant updates:
- updated Level Zero Provider
- new API to handle UMF parameters (replacing previous struct's)
- extended IPC API testing
- new Memtarget and Memspace API

Minor updates:
- multiple fixes in the source code
- extended code coverage reporting
- improved CI and testing
- new examples
- extended logging
- yet more fixes in the building system

Tue Nov 12 2024 Łukasz Stolarczuk <[email protected]>

* Version 0.9.1
Expand Down
9 changes: 8 additions & 1 deletion examples/ipc_ipcapi/ipc_ipcapi_consumer.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,13 @@ int main(int argc, char *argv[]) {
goto err_destroy_OS_memory_provider;
}

umf_ipc_handler_handle_t ipc_handler;
umf_result = umfPoolGetIPCHandler(scalable_pool, &ipc_handler);
if (umf_result != UMF_RESULT_SUCCESS) {
fprintf(stderr, "[producer] ERROR: get IPC handler failed\n");
goto err_destroy_scalable_pool;
}

// connect to the producer
producer_socket = consumer_connect_to_producer(port);
if (producer_socket < 0) {
Expand Down Expand Up @@ -209,7 +216,7 @@ int main(int argc, char *argv[]) {
len);

void *SHM_ptr;
umf_result = umfOpenIPCHandle(scalable_pool, IPC_handle, &SHM_ptr);
umf_result = umfOpenIPCHandle(ipc_handler, IPC_handle, &SHM_ptr);
if (umf_result == UMF_RESULT_ERROR_NOT_SUPPORTED) {
fprintf(stderr,
"[consumer] SKIP: opening the IPC handle is not supported\n");
Expand Down
11 changes: 9 additions & 2 deletions examples/ipc_level_zero/ipc_level_zero.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,14 +180,21 @@ int main(void) {

fprintf(stdout, "Consumer pool created.\n");

umf_ipc_handler_handle_t ipc_handler = 0;
umf_result = umfPoolGetIPCHandler(consumer_pool, &ipc_handler);
if (umf_result != UMF_RESULT_SUCCESS) {
fprintf(stderr, "ERROR: Failed to get IPC handler!\n");
return -1;
}

void *mapped_buf = NULL;
umf_result = umfOpenIPCHandle(consumer_pool, ipc_handle, &mapped_buf);
umf_result = umfOpenIPCHandle(ipc_handler, ipc_handle, &mapped_buf);
if (umf_result != UMF_RESULT_SUCCESS) {
fprintf(stderr, "ERROR: Failed to open IPC handle!\n");
return -1;
}

fprintf(stdout, "IPC handle opened in the consumer pool.\n");
fprintf(stdout, "IPC handle opened.\n");

size_t *tmp_buf = malloc(BUFFER_SIZE);
ret = level_zero_copy(consumer_context, device, tmp_buf, mapped_buf,
Expand Down
13 changes: 11 additions & 2 deletions include/umf/ipc.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ extern "C" {

typedef struct umf_ipc_data_t *umf_ipc_handle_t;

typedef void *umf_ipc_handler_handle_t;

///
/// @brief Returns the size of IPC handles for the specified pool.
/// @param hPool [in] Pool handle
Expand All @@ -44,11 +46,11 @@ umf_result_t umfPutIPCHandle(umf_ipc_handle_t ipcHandle);

///
/// @brief Open IPC handle retrieved by umfGetIPCHandle.
/// @param hPool [in] Pool handle where to open the the IPC handle.
/// @param hIPCHandler [in] IPC Handler handle used to open the IPC handle.
/// @param ipcHandle [in] IPC handle.
/// @param ptr [out] pointer to the memory in the current process.
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
umf_result_t umfOpenIPCHandle(umf_memory_pool_handle_t hPool,
umf_result_t umfOpenIPCHandle(umf_ipc_handler_handle_t hIPCHandler,
umf_ipc_handle_t ipcHandle, void **ptr);

///
Expand All @@ -57,6 +59,13 @@ umf_result_t umfOpenIPCHandle(umf_memory_pool_handle_t hPool,
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
umf_result_t umfCloseIPCHandle(void *ptr);

/// @brief Get handle to the IPC handler from existing pool.
/// @param hPool [in] Pool handle
/// @param hIPCHandler [out] handle to the IPC handler
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
umf_result_t umfPoolGetIPCHandler(umf_memory_pool_handle_t hPool,
umf_ipc_handler_handle_t *hIPCHandler);

#ifdef __cplusplus
}
#endif
Expand Down
Loading
Loading