Skip to content

[HLSL] add extra scalar vector overloads for clamp #129939

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 16 commits into from
Mar 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions clang/lib/Headers/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ set(hlsl_h
set(hlsl_subdir_files
hlsl/hlsl_basic_types.h
hlsl/hlsl_alias_intrinsics.h
hlsl/hlsl_compat_overloads.h
hlsl/hlsl_intrinsic_helpers.h
hlsl/hlsl_intrinsics.h
hlsl/hlsl_detail.h
Expand Down
1 change: 1 addition & 0 deletions clang/lib/Headers/hlsl.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

// HLSL standard library function declarations/definitions.
#include "hlsl/hlsl_alias_intrinsics.h"
#include "hlsl/hlsl_compat_overloads.h"
#include "hlsl/hlsl_intrinsics.h"

#if defined(__clang__)
Expand Down
58 changes: 58 additions & 0 deletions clang/lib/Headers/hlsl/hlsl_compat_overloads.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
//===--- hlsl_compat_overloads.h - Extra HLSL overloads for intrinsics --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef _HLSL_COMPAT_OVERLOADS_H_
#define _HLSl_COMPAT_OVERLOADS_H_

namespace hlsl {

// Note: Functions in this file are sorted alphabetically, then grouped by base
// element type, and the element types are sorted by size, then singed integer,
// unsigned integer and floating point. Keeping this ordering consistent will
// help keep this file manageable as it grows.

//===----------------------------------------------------------------------===//
// clamp builtins overloads
//===----------------------------------------------------------------------===//

template <typename T, typename R, typename U, uint N>
constexpr __detail::enable_if_t<
__detail::is_arithmetic<U>::Value && (N > 1 && N <= 4), vector<T, N>>
clamp(vector<T, N> p0, vector<R, N> p1, U p2) {
return clamp(p0, (vector<T, N>)p1, (vector<T, N>)p2);
}
template <typename T, typename R, typename U, uint N>
constexpr __detail::enable_if_t<
__detail::is_arithmetic<U>::Value && (N > 1 && N <= 4), vector<T, N>>
clamp(vector<T, N> p0, U p1, vector<R, N> p2) {
return clamp(p0, (vector<T, N>)p1, (vector<T, N>)p2);
}
template <typename T, typename U, typename V, uint N>
constexpr __detail::enable_if_t<__detail::is_arithmetic<U>::Value &&
__detail::is_arithmetic<V>::Value &&
(N > 1 && N <= 4),
vector<T, N>>
clamp(vector<T, N> p0, U p1, V p2) {
return clamp(p0, (vector<T, N>)p1, (vector<T, N>)p2);
}
template <typename T, typename R, typename S, uint N>
constexpr __detail::enable_if_t<(N > 1 && N <= 4), vector<T, N>>
clamp(vector<T, N> p0, vector<R, N> p1, vector<S, N> p2) {
return clamp(p0, (vector<T, N>)p1, (vector<T, N>)p2);
}
template <typename U, typename V, typename W>
constexpr __detail::enable_if_t<__detail::is_arithmetic<U>::Value &&
__detail::is_arithmetic<V>::Value &&
__detail::is_arithmetic<W>::Value,
U>
clamp(U p0, V p1, W p2) {
return clamp(p0, (U)p1, (U)p2);
}

} // namespace hlsl
#endif // _HLSL_COMPAT_OVERLOADS_H_
20 changes: 19 additions & 1 deletion clang/lib/Sema/SemaHLSL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2041,6 +2041,23 @@ static bool CheckVectorElementCallArgs(Sema *S, CallExpr *TheCall) {
return false;
}

static bool CheckAllArgsHaveSameType(Sema *S, CallExpr *TheCall) {
assert(TheCall->getNumArgs() > 1);
QualType ArgTy0 = TheCall->getArg(0)->getType();

for (unsigned I = 1, N = TheCall->getNumArgs(); I < N; ++I) {
if (!S->getASTContext().hasSameUnqualifiedType(
ArgTy0, TheCall->getArg(I)->getType())) {
S->Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_incompatible_vector)
<< TheCall->getDirectCallee() << /*useAllTerminology*/ true
<< SourceRange(TheCall->getArg(0)->getBeginLoc(),
TheCall->getArg(N - 1)->getEndLoc());
return true;
}
}
return false;
}

static bool CheckArgTypeMatches(Sema *S, Expr *Arg, QualType ExpectedType) {
QualType ArgType = Arg->getType();
if (!S->getASTContext().hasSameUnqualifiedType(ArgType, ExpectedType)) {
Expand Down Expand Up @@ -2392,7 +2409,8 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Builtin::BI__builtin_hlsl_elementwise_clamp: {
if (SemaRef.checkArgCount(TheCall, 3))
return true;
if (CheckVectorElementCallArgs(&SemaRef, TheCall))
if (CheckAnyScalarOrVector(&SemaRef, TheCall, 0) ||
CheckAllArgsHaveSameType(&SemaRef, TheCall))
return true;
if (SemaRef.BuiltinElementwiseTernaryMath(
TheCall, /*CheckForFloatArgs*/
Expand Down
52 changes: 52 additions & 0 deletions clang/test/CodeGenHLSL/builtins/clamp.hlsl
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ int16_t3 test_clamp_short3(int16_t3 p0, int16_t3 p1) { return clamp(p0, p1,p1);
// NATIVE_HALF: define [[FNATTRS]] <4 x i16> @_Z17test_clamp_short4
// NATIVE_HALF: call <4 x i16> @llvm.[[TARGET]].sclamp.v4i16
int16_t4 test_clamp_short4(int16_t4 p0, int16_t4 p1) { return clamp(p0, p1,p1); }
// NATIVE_HALF: define [[FNATTRS]] <4 x i16> {{.*}}test_clamp_short4_mismatch
// NATIVE_HALF: call <4 x i16> @llvm.[[TARGET]].sclamp.v4i16
int16_t4 test_clamp_short4_mismatch(int16_t4 p0, int16_t p1) { return clamp(p0, p0,p1); }

// NATIVE_HALF: define [[FNATTRS]] i16 @_Z17test_clamp_ushort
// NATIVE_HALF: call i16 @llvm.[[TARGET]].uclamp.i16(
Expand All @@ -41,6 +44,9 @@ uint16_t3 test_clamp_ushort3(uint16_t3 p0, uint16_t3 p1) { return clamp(p0, p1,p
// NATIVE_HALF: define [[FNATTRS]] <4 x i16> @_Z18test_clamp_ushort4
// NATIVE_HALF: call <4 x i16> @llvm.[[TARGET]].uclamp.v4i16
uint16_t4 test_clamp_ushort4(uint16_t4 p0, uint16_t4 p1) { return clamp(p0, p1,p1); }
// NATIVE_HALF: define [[FNATTRS]] <4 x i16> {{.*}}test_clamp_ushort4_mismatch
// NATIVE_HALF: call <4 x i16> @llvm.[[TARGET]].uclamp.v4i16
uint16_t4 test_clamp_ushort4_mismatch(uint16_t4 p0, uint16_t p1) { return clamp(p0, p0,p1); }
#endif

// CHECK: define [[FNATTRS]] i32 @_Z14test_clamp_int
Expand All @@ -55,6 +61,9 @@ int3 test_clamp_int3(int3 p0, int3 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] <4 x i32> @_Z15test_clamp_int4
// CHECK: call <4 x i32> @llvm.[[TARGET]].sclamp.v4i32
int4 test_clamp_int4(int4 p0, int4 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] <4 x i32> {{.*}}test_clamp_int4_mismatch
// CHECK: call <4 x i32> @llvm.[[TARGET]].sclamp.v4i32
int4 test_clamp_int4_mismatch(int4 p0, int p1) { return clamp(p0, p0,p1); }

// CHECK: define [[FNATTRS]] i32 @_Z15test_clamp_uint
// CHECK: call i32 @llvm.[[TARGET]].uclamp.i32(
Expand All @@ -68,6 +77,9 @@ uint3 test_clamp_uint3(uint3 p0, uint3 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] <4 x i32> @_Z16test_clamp_uint4
// CHECK: call <4 x i32> @llvm.[[TARGET]].uclamp.v4i32
uint4 test_clamp_uint4(uint4 p0, uint4 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] <4 x i32> {{.*}}test_clamp_uint4_mismatch
// CHECK: call <4 x i32> @llvm.[[TARGET]].uclamp.v4i32
uint4 test_clamp_uint4_mismatch(uint4 p0, uint p1) { return clamp(p0, p0,p1); }

// CHECK: define [[FNATTRS]] i64 @_Z15test_clamp_long
// CHECK: call i64 @llvm.[[TARGET]].sclamp.i64(
Expand All @@ -81,6 +93,9 @@ int64_t3 test_clamp_long3(int64_t3 p0, int64_t3 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] <4 x i64> @_Z16test_clamp_long4
// CHECK: call <4 x i64> @llvm.[[TARGET]].sclamp.v4i64
int64_t4 test_clamp_long4(int64_t4 p0, int64_t4 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] <4 x i64> {{.*}}test_clamp_long4_mismatch
// CHECK: call <4 x i64> @llvm.[[TARGET]].sclamp.v4i64
int64_t4 test_clamp_long4_mismatch(int64_t4 p0, int64_t p1) { return clamp(p0, p0,p1); }

// CHECK: define [[FNATTRS]] i64 @_Z16test_clamp_ulong
// CHECK: call i64 @llvm.[[TARGET]].uclamp.i64(
Expand All @@ -94,6 +109,9 @@ uint64_t3 test_clamp_ulong3(uint64_t3 p0, uint64_t3 p1) { return clamp(p0, p1,p1
// CHECK: define [[FNATTRS]] <4 x i64> @_Z17test_clamp_ulong4
// CHECK: call <4 x i64> @llvm.[[TARGET]].uclamp.v4i64
uint64_t4 test_clamp_ulong4(uint64_t4 p0, uint64_t4 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] <4 x i64> {{.*}}test_clamp_ulong4_mismatch
// CHECK: call <4 x i64> @llvm.[[TARGET]].uclamp.v4i64
uint64_t4 test_clamp_ulong4_mismatch(uint64_t4 p0, uint64_t p1) { return clamp(p0, p0,p1); }

// NATIVE_HALF: define [[FNATTRS]] [[FFNATTRS]] half @_Z15test_clamp_half
// NATIVE_HALF: call reassoc nnan ninf nsz arcp afn half @llvm.[[TARGET]].nclamp.f16(
Expand All @@ -115,6 +133,11 @@ half3 test_clamp_half3(half3 p0, half3 p1) { return clamp(p0, p1,p1); }
// NO_HALF: define [[FNATTRS]] [[FFNATTRS]] <4 x float> @_Z16test_clamp_half4
// NO_HALF: call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].nclamp.v4f32(
half4 test_clamp_half4(half4 p0, half4 p1) { return clamp(p0, p1,p1); }
// NATIVE_HALF: define [[FNATTRS]] [[FFNATTRS]] <4 x half> {{.*}}test_clamp_half4_mismatch
// NATIVE_HALF: call reassoc nnan ninf nsz arcp afn <4 x half> @llvm.[[TARGET]].nclamp.v4f16
// NO_HALF: define [[FNATTRS]] [[FFNATTRS]] <4 x float> {{.*}}test_clamp_half4_mismatch
// NO_HALF: call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].nclamp.v4f32(
half4 test_clamp_half4_mismatch(half4 p0, half p1) { return clamp(p0, p0,p1); }

// CHECK: define [[FNATTRS]] [[FFNATTRS]] float @_Z16test_clamp_float
// CHECK: call reassoc nnan ninf nsz arcp afn float @llvm.[[TARGET]].nclamp.f32(
Expand All @@ -128,6 +151,9 @@ float3 test_clamp_float3(float3 p0, float3 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x float> @_Z17test_clamp_float4
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].nclamp.v4f32
float4 test_clamp_float4(float4 p0, float4 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x float> {{.*}}test_clamp_float4_mismatch
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x float> @llvm.[[TARGET]].nclamp.v4f32
float4 test_clamp_float4_mismatch(float4 p0, float p1) { return clamp(p0, p0,p1); }

// CHECK: define [[FNATTRS]] [[FFNATTRS]] double @_Z17test_clamp_double
// CHECK: call reassoc nnan ninf nsz arcp afn double @llvm.[[TARGET]].nclamp.f64(
Expand All @@ -141,3 +167,29 @@ double3 test_clamp_double3(double3 p0, double3 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x double> @_Z18test_clamp_double4
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x double> @llvm.[[TARGET]].nclamp.v4f64
double4 test_clamp_double4(double4 p0, double4 p1) { return clamp(p0, p1,p1); }
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x double> {{.*}}test_clamp_double4_mismatch
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x double> @llvm.[[TARGET]].nclamp.v4f64
double4 test_clamp_double4_mismatch(double4 p0, double p1) { return clamp(p0, p0,p1); }
// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x double> {{.*}}test_clamp_double4_mismatch2
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x double> @llvm.[[TARGET]].nclamp.v4f64
double4 test_clamp_double4_mismatch2(double4 p0, double p1) { return clamp(p0, p1,p0); }

// CHECK: define [[FNATTRS]] <2 x i32> {{.*}}_overloads1
// CHECK: call <2 x i32> @llvm.[[TARGET]].sclamp.v2i32
int2 test_overloads1(int2 p0, float2 p1, uint p2) { return clamp(p0, p1, p2); }

// CHECK: define [[FNATTRS]] [[FFNATTRS]] <2 x float> {{.*}}test_overloads2
// CHECK: call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.[[TARGET]].nclamp.v2f32
float2 test_overloads2(float2 p0, uint p1, int2 p2) { return clamp(p0, p1, p2); }

// CHECK: define [[FNATTRS]] <3 x i32> {{.*}}test_overloads3
// CHECK: call <3 x i32> @llvm.[[TARGET]].uclamp.v3i32
uint3 test_overloads3(uint3 p0, int p1, float p2) { return clamp(p0, p1, p2); }

// CHECK: define [[FNATTRS]] [[FFNATTRS]] <4 x double> {{.*}}test_overloads4
// CHECK: call reassoc nnan ninf nsz arcp afn <4 x double> @llvm.[[TARGET]].nclamp.v4f64
double4 test_overloads4(double4 p0, float4 p1, int4 p2) { return clamp(p0, p1, p2); }

// CHECK: define [[FNATTRS]] i32 {{.*}}test_overloads5
// CHECK: call i32 @llvm.[[TARGET]].sclamp.i32(
int test_overloads5(int p0, uint p1, double p2) { return clamp(p0, p1, p2); }
12 changes: 12 additions & 0 deletions clang/test/SemaHLSL/BuiltIns/clamp-errors-16bit.hlsl
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=half
// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=half3
// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=int16_t
// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=int16_t3
// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=uint16_t
// RUN: not %clang_dxc -enable-16bit-types -T cs_6_0 -HV 202x %s 2>&1 | FileCheck %s -DTEST_TYPE=uint16_t3

// check we error on 16 bit type if shader model is too old
// CHECK: '-enable-16bit-types' option requires target HLSL Version >= 2018 and shader model >= 6.2, but HLSL Version is 'hlsl202x' and shader model is '6.0'
TEST_TYPE test_half_error(TEST_TYPE p0, int p1) {
return clamp(p0, p1, p1);
}
55 changes: 43 additions & 12 deletions clang/test/SemaHLSL/BuiltIns/clamp-errors.hlsl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm-only -disable-llvm-passes -verify -verify-ignore-unexpected
// RUN: %clang_cc1 -finclude-default-header -triple dxil-pc-shadermodel6.6-library %s -fnative-half-type -emit-llvm-only -disable-llvm-passes -verify -verify-ignore-unexpected=note

float2 test_no_second_arg(float2 p0) {
return __builtin_hlsl_elementwise_clamp(p0);
Expand All @@ -20,54 +20,85 @@ float2 test_clamp_no_second_arg(float2 p0) {
// expected-error@-1 {{no matching function for call to 'clamp'}}
}

float test_scalar_first_arg(float p0, float2 p1) {
return clamp(p0, p1, p1);
// expected-error@-1 {{call to 'clamp' is ambiguous}}
}

float test_scalar_first_arg2(float p0, float2 p1) {
return clamp(p0, p0, p1);
// expected-error@-1 {{call to 'clamp' is ambiguous}}
}

float2 test_scalar_first_arg3(float p0, float2 p1) {
return clamp(p0, p0, p1);
// expected-error@-1 {{call to 'clamp' is ambiguous}}
}

float3 test_thing(float3 p0, float2 p1) {
return clamp(p0, p0, p1);
// expected-error@-1 {{cannot initialize return object of type 'float3' (aka 'vector<float, 3>') with an rvalue of type 'vector<float, 2>' (vector of 2 'float' values)}}
}

typedef float float5 __attribute__((ext_vector_type(5)));

// check vectors of wrong size are rejected
float5 vec_too_big(float5 p0) {
return clamp(p0, p0, p0);
// expected-error@-1 {{call to 'clamp' is ambiguous}}
}

float2 test_clamp_vector_size_mismatch(float3 p0, float2 p1) {
return clamp(p0, p0, p1);
// expected-warning@-1 {{implicit conversion truncates vector: 'float3' (aka 'vector<float, 3>') to 'vector<float, 2>' (vector of 2 'float' values)}}
// expected-warning@-2 {{implicit conversion truncates vector: 'float3' (aka 'vector<float, 3>') to 'vector<float, 2>' (vector of 2 'float' values)}}
}

float2 test_clamp_builtin_vector_size_mismatch(float3 p0, float2 p1) {
return __builtin_hlsl_elementwise_clamp(p0, p1, p1);
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

// allowed by the overloads in hlsl_compat_overloads.h
// support for this overload might be removed in a future version of hlsl
float test_clamp_scalar_mismatch(float p0, half p1) {
return clamp(p1, p0, p1);
// expected-error@-1 {{call to 'clamp' is ambiguous}}
}

// allowed by the overloads in hlsl_compat_overloads.h
// support for this overload might be removed in a future version of hlsl
float2 test_clamp_element_type_mismatch(half2 p0, float2 p1) {
return clamp(p1, p0, p1);
// expected-error@-1 {{call to 'clamp' is ambiguous}}
}

float2 test_builtin_clamp_float2_splat(float p0, float2 p1) {
return __builtin_hlsl_elementwise_clamp(p0, p1, p1);
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must be vectors}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

float3 test_builtin_clamp_float3_splat(float p0, float3 p1) {
return __builtin_hlsl_elementwise_clamp(p0, p1, p1);
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must be vectors}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

float4 test_builtin_clamp_float4_splat(float p0, float4 p1) {
return __builtin_hlsl_elementwise_clamp(p0, p1, p1);
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must be vectors}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

float2 test_clamp_float2_int_splat(float2 p0, int p1) {
return __builtin_hlsl_elementwise_clamp(p0, p1, p1);
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must be vectors}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

float3 test_clamp_float3_int_splat(float3 p0, int p1) {
return __builtin_hlsl_elementwise_clamp(p0, p1, p1);
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must be vectors}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

float2 test_builtin_clamp_int_vect_to_float_vec_promotion(int2 p0, float p1) {
return __builtin_hlsl_elementwise_clamp(p0, p1, p1);
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must be vectors}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

float test_builtin_clamp_bool_type_promotion(bool p0) {
Expand All @@ -77,15 +108,15 @@ float test_builtin_clamp_bool_type_promotion(bool p0) {

float builtin_bool_to_float_type_promotion(float p0, bool p1) {
return __builtin_hlsl_elementwise_clamp(p0, p0, p1);
// expected-error@-1 {{3rd argument must be a floating point type (was 'bool')}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

float builtin_bool_to_float_type_promotion2(bool p0, float p1) {
return __builtin_hlsl_elementwise_clamp(p1, p0, p1);
// expected-error@-1 {{2nd argument must be a floating point type (was 'bool')}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}

float builtin_clamp_int_to_float_promotion(float p0, int p1) {
return __builtin_hlsl_elementwise_clamp(p0, p0, p1);
// expected-error@-1 {{3rd argument must be a floating point type (was 'int')}}
// expected-error@-1 {{all arguments to '__builtin_hlsl_elementwise_clamp' must have the same type}}
}