Skip to content

sycl-rel_5_2_0: [Bindless][Exp][NFC] Remove Unnecessary 3D Array Image Helpers (#13022) #13470

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 18 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
22e9785
[UR][L0] Support for urUsmP2PPeerAccessGetInfoExp to query p2p access…
nrspruit Mar 18, 2024
fa53fea
[CUDA][LIBCLC] Implement RC11 seq_cst for PTX6.0 (#12516)
JackAKirk Mar 18, 2024
0326cdc
[UR] Add urProgramGetGlobalVariablePointer entrypoint (#12496)
fabiomestre Mar 19, 2024
a486c12
[SYCL][Graph][UR] Update UR to support updating kernel commands in co…
againull Mar 19, 2024
0838aba
[UR] CI for UR PR refactor-guess-local-worksize (#12663)
Mar 20, 2024
257ac92
[SYCL][Graph][HIP] Set minimum ROCm version for graphs (#13035)
EwanC Mar 21, 2024
42919a9
[UR][L0] Fix Native Host memory usage on device with copy back sync (…
nrspruit Mar 21, 2024
1ba64e4
[UR][L0] Enable default support for L0 in-order lists (#13033)
raiyanla Mar 22, 2024
455c764
[SYCL][Graph][L0] Test Coverity fix (#13075)
EwanC Mar 25, 2024
1de8dbe
[UR][L0] fix a deadlock on a recursive event rwlock (#13112)
pbalcer Mar 27, 2024
4c54bfe
[UR] Refactor Device Initialisation (#12762)
Mar 28, 2024
6fd40bb
[UR] Pull in UR changes to add exec error status to events. (#13127)
aarongreig Apr 1, 2024
30c1495
[UR] Remove unused function prototypes (#13072)
Apr 1, 2024
a669736
[UR] Add DEVICE_NOT_AVAILABLE UR error code and PI translation for sa…
aarongreig Apr 2, 2024
4627abf
[UR][CL] Atomic order memory capability for Intel FPGA driver (#13041)
kbenzie Apr 5, 2024
40c2781
[UR][L0] Fix DeviceInfo global mem free to report unsupported given M…
nrspruit Apr 8, 2024
1985ba5
[SYCL][PI] Add PI_ERROR_UNSUPPORTED_FEATURE error code (#13036)
steffenlarsen Mar 20, 2024
e9ec31d
[Bindless][Exp][NFC] Remove Unnecessary 3D Array Image Helpers (#13022)
isaacault Mar 18, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions libclc/ptx-nvidiacl/libspirv/atomic/atomic_add.cl
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,13 @@ Memory order is stored in the lowest 5 bits */
ADDR_SPACE, ADDR_SPACE_NV) \
} \
break; \
case SequentiallyConsistent: \
if (__clc_nvvm_reflect_arch() >= 700) { \
__CLC_NVVM_FENCE_SC_SM70() \
__CLC_NVVM_ATOMIC_IMPL_ORDER(double, double, d, add, ADDR_SPACE, \
ADDR_SPACE_NV, _acq_rel) \
break; \
} \
} \
__builtin_trap(); \
__builtin_unreachable(); \
Expand Down
8 changes: 8 additions & 0 deletions libclc/ptx-nvidiacl/libspirv/atomic/atomic_cmpxchg.cl
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//

#include <atomic_helpers.h>
#include <spirv/spirv.h>
#include <spirv/spirv_types.h>

Expand Down Expand Up @@ -120,6 +121,13 @@ Memory order is stored in the lowest 5 bits */ \
TYPE, TYPE_NV, TYPE_MANGLED_NV, OP, ADDR_SPACE, ADDR_SPACE_NV) \
} \
break; \
case SequentiallyConsistent: \
if (__clc_nvvm_reflect_arch() >= 700) { \
__CLC_NVVM_FENCE_SC_SM70() \
__CLC_NVVM_ATOMIC_CAS_IMPL_ORDER(TYPE, TYPE_NV, TYPE_MANGLED_NV, OP, \
ADDR_SPACE, ADDR_SPACE_NV, _acq_rel) \
break; \
} \
} \
__builtin_trap(); \
__builtin_unreachable(); \
Expand Down
16 changes: 16 additions & 0 deletions libclc/ptx-nvidiacl/libspirv/atomic/atomic_helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,15 @@ _CLC_OVERLOAD _CLC_DECL void __spirv_MemoryBarrier(unsigned int, unsigned int);
} \
}

#define __CLC_NVVM_FENCE_SC_SM70() \
if (scope == CrossDevice) { \
__asm__ __volatile__("fence.sc.sys;"); \
} else if (scope == Device) { \
__asm__ __volatile__("fence.sc.gpu;"); \
} else { \
__asm__ __volatile__("fence.sc.cta;"); \
}

#define __CLC_NVVM_ATOMIC_IMPL( \
TYPE, TYPE_MANGLED, TYPE_NV, TYPE_MANGLED_NV, OP, NAME_MANGLED, \
ADDR_SPACE, POINTER_AND_ADDR_SPACE_MANGLED, ADDR_SPACE_NV, SUBSTITUTION) \
Expand Down Expand Up @@ -117,6 +126,13 @@ Memory order is stored in the lowest 5 bits */ \
OP, ADDR_SPACE, ADDR_SPACE_NV) \
} \
break; \
case SequentiallyConsistent: \
if (__clc_nvvm_reflect_arch() >= 700) { \
__CLC_NVVM_FENCE_SC_SM70() \
__CLC_NVVM_ATOMIC_IMPL_ORDER(TYPE, TYPE_NV, TYPE_MANGLED_NV, OP, \
ADDR_SPACE, ADDR_SPACE_NV, _acq_rel) \
break; \
} \
} \
__builtin_trap(); \
__builtin_unreachable(); \
Expand Down
7 changes: 7 additions & 0 deletions libclc/ptx-nvidiacl/libspirv/atomic/atomic_load.cl
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//

#include <atomic_helpers.h>
#include <spirv/spirv.h>
#include <spirv/spirv_types.h>

Expand Down Expand Up @@ -53,6 +54,12 @@ Memory order is stored in the lowest 5 bits */ \
case Acquire: \
__CLC_NVVM_ATOMIC_LOAD_IMPL_ORDER(TYPE, TYPE_NV, TYPE_MANGLED_NV, \
ADDR_SPACE, ADDR_SPACE_NV, _acquire) \
break; \
case SequentiallyConsistent: \
__CLC_NVVM_FENCE_SC_SM70() \
__CLC_NVVM_ATOMIC_LOAD_IMPL_ORDER(TYPE, TYPE_NV, TYPE_MANGLED_NV, \
ADDR_SPACE, ADDR_SPACE_NV, _acquire) \
break; \
} \
} else { \
TYPE_NV res = __nvvm_volatile_ld##ADDR_SPACE_NV##TYPE_MANGLED_NV( \
Expand Down
8 changes: 8 additions & 0 deletions libclc/ptx-nvidiacl/libspirv/atomic/atomic_store.cl
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//

#include <atomic_helpers.h>
#include <spirv/spirv.h>
#include <spirv/spirv_types.h>

Expand Down Expand Up @@ -54,6 +55,13 @@ Memory order is stored in the lowest 5 bits */ \
__CLC_NVVM_ATOMIC_STORE_IMPL_ORDER(TYPE, TYPE_NV, TYPE_MANGLED_NV, \
ADDR_SPACE, ADDR_SPACE_NV, \
_release) \
break; \
case SequentiallyConsistent: \
__CLC_NVVM_FENCE_SC_SM70() \
__CLC_NVVM_ATOMIC_STORE_IMPL_ORDER(TYPE, TYPE_NV, TYPE_MANGLED_NV, \
ADDR_SPACE, ADDR_SPACE_NV, \
_release) \
break; \
} \
} else { \
switch (order) { \
Expand Down
63 changes: 8 additions & 55 deletions libclc/ptx-nvidiacl/libspirv/images/image_helpers.ll
Original file line number Diff line number Diff line change
Expand Up @@ -500,19 +500,19 @@ entry:
;
; @llvm.nvvm.suld.<NDims>.array.v<NChannels><DType>.clamp
;
; <NDims> = { 1d, 2d, 3d }
; <NDims> = { 1d, 2d }
; <NChannels> = { 2, 4 }
; <Dtype> = { i8, i16, i32 }
;
; Note: The case of NChannels=1 doesn't need to be handled here as it can be
; Note: The case of NChannels=1 does not need to be handled here as it can be
; called directly.


; @llvm.nvvm.suld.<NDims>.array.v<NChannels>{i8, i16, i32}.clamp

; - @llvm.nvvm.suld.<NDims>.array.v{2, 4}i8.clamp

; - - @llvm.nvvm.suld.{1d, 2d, 3d}.array.v2i8.clamp
; - - @llvm.nvvm.suld.{1d, 2d}.array.v2i8.clamp

declare {i16,i16} @llvm.nvvm.suld.1d.array.v2i8.clamp(i64, i32, i32)
define <2 x i16> @__clc_llvm_nvvm_suld_1d_array_v2i8_clamp(i64 %img, i32 %idx, i32 %x) nounwind alwaysinline {
Expand All @@ -530,15 +530,7 @@ entry:
ret <2 x i16> %1
}

declare {i16,i16} @llvm.nvvm.suld.3d.array.v2i8.clamp(i64, i32, i32, i32, i32)
define <2 x i16> @__clc_llvm_nvvm_suld_3d_array_v2i8_clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z) nounwind alwaysinline {
entry:
%0 = tail call {i16,i16} @llvm.nvvm.suld.3d.array.v2i8.clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z);
%1 = tail call <2 x i16> @__clc_struct16_to_vector2({i16,i16} %0)
ret <2 x i16> %1
}

; - - @llvm.nvvm.suld.{1d, 2d, 3d}.array.v4i8.clamp
; - - @llvm.nvvm.suld.{1d, 2d}.array.v4i8.clamp

declare {i16,i16,i16,i16} @llvm.nvvm.suld.1d.array.v4i8.clamp(i64, i32, i32)
define <4 x i16> @__clc_llvm_nvvm_suld_1d_array_v4i8_clamp(i64 %img, i32 %idx, i32 %x) nounwind alwaysinline {
Expand All @@ -556,17 +548,9 @@ entry:
ret <4 x i16> %1
}

declare {i16,i16,i16,i16} @llvm.nvvm.suld.3d.array.v4i8.clamp(i64, i32, i32, i32, i32)
define <4 x i16> @__clc_llvm_nvvm_suld_3d_array_v4i8_clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z) nounwind alwaysinline {
entry:
%0 = tail call {i16,i16,i16,i16} @llvm.nvvm.suld.3d.array.v4i8.clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z);
%1 = tail call <4 x i16> @__clc_struct16_to_vector({i16,i16,i16,i16} %0)
ret <4 x i16> %1
}

; - @llvm.nvvm.suld.<NDims>.array.v{2, 4}i16.clamp

; - - @llvm.nvvm.suld.{1d, 2d, 3d}.array.v2i16.clamp
; - - @llvm.nvvm.suld.{1d, 2d}.array.v2i16.clamp

declare {i16,i16} @llvm.nvvm.suld.1d.array.v2i16.clamp(i64, i32, i32)
define <2 x i16> @__clc_llvm_nvvm_suld_1d_array_v2i16_clamp(i64 %img, i32 %idx, i32 %x) nounwind alwaysinline {
Expand All @@ -584,15 +568,7 @@ entry:
ret <2 x i16> %1
}

declare {i16,i16} @llvm.nvvm.suld.3d.array.v2i16.clamp(i64, i32, i32, i32, i32)
define <2 x i16> @__clc_llvm_nvvm_suld_3d_array_v2i16_clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z) nounwind alwaysinline {
entry:
%0 = tail call {i16,i16} @llvm.nvvm.suld.3d.array.v2i16.clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z);
%1 = tail call <2 x i16> @__clc_struct16_to_vector2({i16,i16} %0)
ret <2 x i16> %1
}

; - - @llvm.nvvm.suld.{1d, 2d, 3d}.array.v4i16.clamp
; - - @llvm.nvvm.suld.{1d, 2d}.array.v4i16.clamp

declare {i16,i16,i16,i16} @llvm.nvvm.suld.1d.array.v4i16.clamp(i64, i32, i32)
define <4 x i16> @__clc_llvm_nvvm_suld_1d_array_v4i16_clamp(i64 %img, i32 %idx, i32 %x) nounwind alwaysinline {
Expand All @@ -610,17 +586,9 @@ entry:
ret <4 x i16> %1
}

declare {i16,i16,i16,i16} @llvm.nvvm.suld.3d.array.v4i16.clamp(i64, i32, i32, i32, i32)
define <4 x i16> @__clc_llvm_nvvm_suld_3d_array_v4i16_clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z) nounwind alwaysinline {
entry:
%0 = tail call {i16,i16,i16,i16} @llvm.nvvm.suld.3d.array.v4i16.clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z);
%1 = tail call <4 x i16> @__clc_struct16_to_vector({i16,i16,i16,i16} %0)
ret <4 x i16> %1
}

; - @llvm.nvvm.suld.<NDims>.array.v{2, 4}i32.clamp

; - - @llvm.nvvm.suld.{1d, 2d, 3d}.array.v2i32.clamp
; - - @llvm.nvvm.suld.{1d, 2d}.array.v2i32.clamp

declare {i32,i32} @llvm.nvvm.suld.1d.array.v2i32.clamp(i64, i32, i32)
define <2 x i32> @__clc_llvm_nvvm_suld_1d_array_v2i32_clamp(i64 %img, i32 %idx, i32 %x) nounwind alwaysinline {
Expand All @@ -638,17 +606,9 @@ entry:
ret <2 x i32> %1
}

declare {i32,i32} @llvm.nvvm.suld.3d.array.v2i32.clamp(i64, i32, i32, i32, i32)
define <2 x i32> @__clc_llvm_nvvm_suld_3d_array_v2i32_clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z) nounwind alwaysinline {
entry:
%0 = tail call {i32,i32} @llvm.nvvm.suld.3d.array.v2i32.clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z);
%1 = tail call <2 x i32> @__clc_struct32_to_vector2({i32,i32} %0)
ret <2 x i32> %1
}

; - @llvm.nvvm.suld.<NDims>.array.v4i32.clamp

; - - @llvm.nvvm.suld.{1d, 2d, 3d}.array.v4i32.clamp
; - - @llvm.nvvm.suld.{1d, 2d}.array.v4i32.clamp

declare {i32,i32,i32,i32} @llvm.nvvm.suld.1d.array.v4i32.clamp(i64, i32, i32)
define <4 x i32> @__clc_llvm_nvvm_suld_1d_array_v4i32_clamp(i64 %img, i32 %idx, i32 %x) nounwind alwaysinline {
Expand All @@ -665,10 +625,3 @@ entry:
%1 = tail call <4 x i32> @__clc_struct32_to_vector({i32,i32,i32,i32} %0)
ret <4 x i32> %1
}

declare {i32,i32,i32,i32} @llvm.nvvm.suld.3d.array.v4i32.clamp(i64, i32, i32, i32, i32)
define <4 x i32> @__clc_llvm_nvvm_suld_3d_array_v4i32_clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z) nounwind alwaysinline {
entry:
%0 = tail call {i32,i32,i32,i32} @llvm.nvvm.suld.3d.array.v4i32.clamp(i64 %img, i32 %idx, i32 %x, i32 %y, i32 %z);
%1 = tail call <4 x i32> @__clc_struct32_to_vector({i32,i32,i32,i32} %0) ret <4 x i32> %1
}
4 changes: 3 additions & 1 deletion sycl/doc/design/CommandGraph.md
Original file line number Diff line number Diff line change
Expand Up @@ -405,8 +405,10 @@ The HIP backend offers a Graph managemenet API very similar to CUDA Graph
feature for batching series of operations.
The SYCL Graph HIP backend implementation is therefore very similar to that of CUDA.

The minimum version of ROCm required to support `sycl_ext_oneapi_graph` is 5.5.1.

UR commands (e.g. kernels) are mapped as graph nodes using the
[HIP Management API](https://docs.amd.com/projects/HIP/en/docs-5.5.0/doxygen/html/group___graph.html).
[HIP Management API](https://rocm.docs.amd.com/projects/HIP/en/docs-5.5.1/doxygen/html/group___graph.html).
Synchronization between commands (UR sync-points) is implemented
using graph dependencies.
Executable HIP Graphs can be submitted to a HIP stream
Expand Down
1 change: 1 addition & 0 deletions sycl/include/sycl/detail/pi.def
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ _PI_API(piDeviceRetain)
_PI_API(piDeviceRelease)
_PI_API(piextDeviceSelectBinary)
_PI_API(piextGetDeviceFunctionPointer)
_PI_API(piextGetGlobalVariablePointer)
_PI_API(piextDeviceGetNativeHandle)
_PI_API(piextDeviceCreateWithNativeHandle)
// Context
Expand Down
8 changes: 7 additions & 1 deletion sycl/include/sycl/detail/pi.h
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,11 @@
// 15.44 Add coarse-grain memory advice flag for HIP.
// 15.45 Added piextKernelSuggestMaxCooperativeGroupCount and
// piextEnqueueCooperativeKernelLaunch.
// 15.46 Add piextGetGlobalVariablePointer
// 15.47 Added PI_ERROR_FEATURE_UNSUPPORTED.

#define _PI_H_VERSION_MAJOR 15
#define _PI_H_VERSION_MINOR 45
#define _PI_H_VERSION_MINOR 47

#define _PI_STRING_HELPER(a) #a
#define _PI_CONCAT(a, b) _PI_STRING_HELPER(a.b)
Expand Down Expand Up @@ -1287,6 +1289,10 @@ __SYCL_EXPORT pi_result piextGetDeviceFunctionPointer(
pi_device device, pi_program program, const char *function_name,
pi_uint64 *function_pointer_ret);

__SYCL_EXPORT pi_result piextGetGlobalVariablePointer(
pi_device Device, pi_program Program, const char *GlobalVariableName,
size_t *GlobalVariableSize, void **GlobalVariablePointerRet);

//
// Context
//
Expand Down
3 changes: 3 additions & 0 deletions sycl/include/sycl/detail/pi_error.def
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,9 @@ _PI_ERRC(PI_ERROR_VA_API_MEDIA_SURFACE_NOT_ACQUIRED_INTEL, -1101)
_PI_ERRC(PI_ERROR_UNINITIALIZED, -1102)

// PI specific error codes
// PI_ERROR_UNSUPPORTED_FEATURE indicates that the backend or the corresponding
// device does not support the feature.
_PI_ERRC_WITH_MSG(PI_ERROR_UNSUPPORTED_FEATURE, -995, "The plugin or device does not support the called function")
// PI_ERROR_PLUGIN_SPECIFIC_ERROR indicates that an backend spcific error or
// warning has been emitted by the plugin.
_PI_ERRC_WITH_MSG(PI_ERROR_PLUGIN_SPECIFIC_ERROR, -996, "The plugin has emitted a backend specific error")
Expand Down
9 changes: 9 additions & 0 deletions sycl/plugins/cuda/pi_cuda.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -848,6 +848,15 @@ pi_result piextGetDeviceFunctionPointer(pi_device Device, pi_program Program,
FunctionPointerRet);
}

pi_result piextGetGlobalVariablePointer(pi_device Device, pi_program Program,
const char *GlobalVariableName,
size_t *GlobalVariableSize,
void **GlobalVariablePointerRet) {
return pi2ur::piextGetGlobalVariablePointer(
Device, Program, GlobalVariableName, GlobalVariableSize,
GlobalVariablePointerRet);
}

pi_result piextUSMDeviceAlloc(void **ResultPtr, pi_context Context,
pi_device Device,
pi_usm_mem_properties *Properties, size_t Size,
Expand Down
9 changes: 9 additions & 0 deletions sycl/plugins/hip/pi_hip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -851,6 +851,15 @@ pi_result piextGetDeviceFunctionPointer(pi_device Device, pi_program Program,
FunctionPointerRet);
}

pi_result piextGetGlobalVariablePointer(pi_device Device, pi_program Program,
const char *GlobalVariableName,
size_t *GlobalVariableSize,
void **GlobalVariablePointerRet) {
return pi2ur::piextGetGlobalVariablePointer(
Device, Program, GlobalVariableName, GlobalVariableSize,
GlobalVariablePointerRet);
}

pi_result piextUSMDeviceAlloc(void **ResultPtr, pi_context Context,
pi_device Device,
pi_usm_mem_properties *Properties, size_t Size,
Expand Down
9 changes: 9 additions & 0 deletions sycl/plugins/level_zero/pi_level_zero.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -875,6 +875,15 @@ pi_result piextGetDeviceFunctionPointer(pi_device Device, pi_program Program,
FunctionPointerRet);
}

pi_result piextGetGlobalVariablePointer(pi_device Device, pi_program Program,
const char *GlobalVariableName,
size_t *GlobalVariableSize,
void **GlobalVariablePointerRet) {
return pi2ur::piextGetGlobalVariablePointer(
Device, Program, GlobalVariableName, GlobalVariableSize,
GlobalVariablePointerRet);
}

pi_result piextUSMDeviceAlloc(void **ResultPtr, pi_context Context,
pi_device Device,
pi_usm_mem_properties *Properties, size_t Size,
Expand Down
13 changes: 11 additions & 2 deletions sycl/plugins/native_cpu/pi_native_cpu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -852,6 +852,15 @@ pi_result piextGetDeviceFunctionPointer(pi_device Device, pi_program Program,
FunctionPointerRet);
}

pi_result piextGetGlobalVariablePointer(pi_device Device, pi_program Program,
const char *GlobalVariableName,
size_t *GlobalVariableSize,
void **GlobalVariablePointerRet) {
return pi2ur::piextGetGlobalVariablePointer(
Device, Program, GlobalVariableName, GlobalVariableSize,
GlobalVariablePointerRet);
}

pi_result piextUSMDeviceAlloc(void **ResultPtr, pi_context Context,
pi_device Device,
pi_usm_mem_properties *Properties, size_t Size,
Expand Down Expand Up @@ -1257,13 +1266,13 @@ pi_result piextEnqueueCooperativeKernelLaunch(
const size_t *, const size_t *,
const size_t *, pi_uint32 ,
const pi_event *, pi_event *) {
return PI_ERROR_INVALID_OPERATION;
return PI_ERROR_UNSUPPORTED_FEATURE;
}

pi_result piextKernelSuggestMaxCooperativeGroupCount(
pi_kernel , size_t , size_t ,
pi_uint32 *) {
return PI_ERROR_INVALID_OPERATION;
return PI_ERROR_UNSUPPORTED_FEATURE;
}

// Initialize function table with stubs.
Expand Down
Loading