|
| 1 | +// Copyright (C) 2025 Intel Corporation |
| 2 | +// Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM |
| 3 | +// Exceptions. See LICENSE.TXT |
| 4 | +// |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | + |
| 7 | +#include "common.h" |
| 8 | +#include <cuda.h> |
| 9 | + |
| 10 | +// Test using using CUDA-Graph to add commands to a native CUDA command-buffer. |
| 11 | +struct urCudaCommandBufferNativeAppendTest |
| 12 | + : uur::command_buffer::urCommandBufferNativeAppendTest { |
| 13 | + void SetUp() override { |
| 14 | + UUR_RETURN_ON_FATAL_FAILURE( |
| 15 | + uur::command_buffer::urCommandBufferNativeAppendTest::SetUp()); |
| 16 | + if (backend != UR_PLATFORM_BACKEND_CUDA) { |
| 17 | + GTEST_SKIP() << "Native append test is only supported on CUDA."; |
| 18 | + } |
| 19 | + |
| 20 | + // CUDA-Graph supports adds sub-graph nodes to a parent graph |
| 21 | + ur_bool_t subgraph_support = false; |
| 22 | + EXPECT_SUCCESS(urDeviceGetInfo( |
| 23 | + device, UR_DEVICE_INFO_COMMAND_BUFFER_SUBGRAPH_SUPPORT_EXP, |
| 24 | + sizeof(ur_bool_t), &subgraph_support, nullptr)); |
| 25 | + EXPECT_TRUE(subgraph_support); |
| 26 | + |
| 27 | + // Create a non-updatable graph as a child graph |
| 28 | + ur_exp_command_buffer_desc_t desc{ |
| 29 | + UR_STRUCTURE_TYPE_EXP_COMMAND_BUFFER_DESC /*stype*/, nullptr /*pnext*/, |
| 30 | + false /* updatable */, false /* in-order */, false /* profilable*/ |
| 31 | + }; |
| 32 | + UUR_RETURN_ON_FATAL_FAILURE( |
| 33 | + urCommandBufferCreateExp(context, device, &desc, &child_cmd_buf)); |
| 34 | + } |
| 35 | + |
| 36 | + void TearDown() override { |
| 37 | + if (child_cmd_buf) { |
| 38 | + EXPECT_SUCCESS(urCommandBufferReleaseExp(child_cmd_buf)); |
| 39 | + } |
| 40 | + UUR_RETURN_ON_FATAL_FAILURE( |
| 41 | + uur::command_buffer::urCommandBufferNativeAppendTest::TearDown()); |
| 42 | + } |
| 43 | + |
| 44 | + ur_exp_command_buffer_handle_t child_cmd_buf = nullptr; |
| 45 | +}; |
| 46 | + |
| 47 | +UUR_INSTANTIATE_DEVICE_TEST_SUITE(urCudaCommandBufferNativeAppendTest); |
| 48 | + |
| 49 | +namespace { |
| 50 | +struct InteropData { |
| 51 | + ur_exp_command_buffer_handle_t command_buffer; |
| 52 | + ur_context_handle_t context; |
| 53 | + void *src; |
| 54 | + void *dst; |
| 55 | +}; |
| 56 | + |
| 57 | +// Native command-buffer command is a single USM device pointer copy command |
| 58 | +void interop_func(void *data) { |
| 59 | + InteropData *func_data = reinterpret_cast<InteropData *>(data); |
| 60 | + ASSERT_NE(nullptr, func_data); |
| 61 | + |
| 62 | + CUgraph native_graph{}; |
| 63 | + ASSERT_SUCCESS(urCommandBufferGetNativeHandleExp( |
| 64 | + func_data->command_buffer, (ur_native_handle_t *)&native_graph)); |
| 65 | + ASSERT_NE(CUgraph{}, native_graph); |
| 66 | + |
| 67 | + CUcontext native_context{}; |
| 68 | + ASSERT_SUCCESS(urContextGetNativeHandle( |
| 69 | + func_data->context, (ur_native_handle_t *)&native_context)); |
| 70 | + ASSERT_NE(CUcontext{}, native_context); |
| 71 | + |
| 72 | + CUDA_MEMCPY3D params{}; |
| 73 | + params.srcMemoryType = CU_MEMORYTYPE_DEVICE; |
| 74 | + params.srcDevice = (CUdeviceptr)func_data->src; |
| 75 | + params.srcHost = nullptr; |
| 76 | + params.dstMemoryType = CU_MEMORYTYPE_DEVICE; |
| 77 | + params.dstDevice = (CUdeviceptr)func_data->dst; |
| 78 | + params.dstHost = nullptr; |
| 79 | + params.WidthInBytes = |
| 80 | + uur::command_buffer::urCommandBufferNativeAppendTest::allocation_size; |
| 81 | + params.Height = 1; |
| 82 | + params.Depth = 1; |
| 83 | + |
| 84 | + CUgraphNode node; |
| 85 | + auto res = cuGraphAddMemcpyNode(&node, native_graph, nullptr, 0, ¶ms, |
| 86 | + native_context); |
| 87 | + ASSERT_EQ(res, CUDA_SUCCESS); |
| 88 | +} |
| 89 | +} // end anonymous namespace |
| 90 | + |
| 91 | +// Test command-buffer with a single native command, which when enqueued has an |
| 92 | +// eager UR command as a predecessor and eager UR command as a successor. |
| 93 | +TEST_P(urCudaCommandBufferNativeAppendTest, Success) { |
| 94 | + InteropData data{child_cmd_buf, context, src_device_ptr, dst_device_ptr}; |
| 95 | + ASSERT_SUCCESS(urCommandBufferAppendNativeCommandExp( |
| 96 | + command_buffer, &interop_func, &data, child_cmd_buf, 0, nullptr, |
| 97 | + nullptr)); |
| 98 | + ASSERT_SUCCESS(urCommandBufferFinalizeExp(command_buffer)); |
| 99 | + |
| 100 | + ASSERT_SUCCESS(urEnqueueUSMFill(queue, src_device_ptr, sizeof(val), &val, |
| 101 | + allocation_size, 0, nullptr, nullptr)); |
| 102 | + ASSERT_SUCCESS( |
| 103 | + urEnqueueCommandBufferExp(queue, command_buffer, 0, nullptr, nullptr)); |
| 104 | + |
| 105 | + ASSERT_SUCCESS(urEnqueueUSMMemcpy(queue, true, host_vec.data(), |
| 106 | + dst_device_ptr, allocation_size, 0, nullptr, |
| 107 | + nullptr)); |
| 108 | + |
| 109 | + for (auto &i : host_vec) { |
| 110 | + ASSERT_EQ(i, val); |
| 111 | + } |
| 112 | +} |
| 113 | + |
| 114 | +// Test command-buffer native command with other command-buffer commands as |
| 115 | +// predecessors and successors |
| 116 | +TEST_P(urCudaCommandBufferNativeAppendTest, Dependencies) { |
| 117 | + ur_exp_command_buffer_sync_point_t sync_point_1; |
| 118 | + ASSERT_SUCCESS(urCommandBufferAppendUSMFillExp( |
| 119 | + command_buffer, src_device_ptr, &val, sizeof(val), allocation_size, 0, |
| 120 | + nullptr, 0, nullptr, &sync_point_1, nullptr, nullptr)); |
| 121 | + |
| 122 | + InteropData data{child_cmd_buf, context, src_device_ptr, dst_device_ptr}; |
| 123 | + ur_exp_command_buffer_sync_point_t sync_point_2; |
| 124 | + ASSERT_SUCCESS(urCommandBufferAppendNativeCommandExp( |
| 125 | + command_buffer, &interop_func, &data, child_cmd_buf, 1, &sync_point_1, |
| 126 | + &sync_point_2)); |
| 127 | + |
| 128 | + ASSERT_SUCCESS(urCommandBufferAppendUSMMemcpyExp( |
| 129 | + command_buffer, host_vec.data(), dst_device_ptr, allocation_size, 1, |
| 130 | + &sync_point_2, 0, nullptr, nullptr, nullptr, nullptr)); |
| 131 | + |
| 132 | + ASSERT_SUCCESS(urCommandBufferFinalizeExp(command_buffer)); |
| 133 | + |
| 134 | + ASSERT_SUCCESS( |
| 135 | + urEnqueueCommandBufferExp(queue, command_buffer, 0, nullptr, nullptr)); |
| 136 | + |
| 137 | + urQueueFinish(queue); |
| 138 | + for (auto &i : host_vec) { |
| 139 | + ASSERT_EQ(i, val); |
| 140 | + } |
| 141 | +} |
0 commit comments