-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[mlir][llvm] Add llvm.experimental.constrained.fpext
operation
#129054
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[mlir][llvm] Add llvm.experimental.constrained.fpext
operation
#129054
Conversation
@llvm/pr-subscribers-mlir Author: Letu Ren (FantasqueX) ChangesRef: #86260 Full diff: https://github.com/llvm/llvm-project/pull/129054.diff 3 Files Affected:
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index c270b0898f865..52d76946f2363 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -456,6 +456,15 @@ def LLVM_ConstrainedFPTruncIntr
}];
}
+def LLVM_ConstrainedFPExtIntr
+ : LLVM_ConstrainedIntr<"fpext", /*numArgs=*/1,
+ /*overloadedResult=*/1, /*overloadedOperands=*/[0],
+ /*hasRoundingMode=*/0> {
+ let assemblyFormat = [{
+ $arg_0 $fpExceptionBehavior attr-dict `:` type($arg_0) `to` type(results)
+ }];
+}
+
// Intrinsics with multiple returns.
class LLVM_ArithWithOverflowOp<string mnem>
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 569b0def37856..cf88f009050ed 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -1003,6 +1003,19 @@ define void @experimental_constrained_fptrunc(double %s, <4 x double> %v) {
ret void
}
+; CHECK-LABEL: experimental_constrained_fpext
+define void @experimental_constrained_fpext(float %s, <4 x float> %v) {
+ ; CHECK: llvm.intr.experimental.constrained.fpext %{{.*}} ignore : f32 to f64
+ %1 = call double @llvm.experimental.constrained.fpext.f64.f32(float %s, metadata !"fpexcept.ignore")
+ ; CHECK: llvm.intr.experimental.constrained.fpext %{{.*}} maytrap : f32 to f64
+ %2 = call double @llvm.experimental.constrained.fpext.f64.f32(float %s, metadata !"fpexcept.maytrap")
+ ; CHECK: llvm.intr.experimental.constrained.fpext %{{.*}} strict : f32 to f64
+ %3 = call double @llvm.experimental.constrained.fpext.f64.f32(float %s, metadata !"fpexcept.strict")
+ ; CHECK: llvm.intr.experimental.constrained.fpext %{{.*}} ignore : vector<4xf32> to vector<4xf64>
+ %6 = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float> %v, metadata !"fpexcept.ignore")
+ ret void
+}
+
declare float @llvm.fmuladd.f32(float, float, float)
declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>)
declare float @llvm.fma.f32(float, float, float)
@@ -1243,3 +1256,5 @@ declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x flo
declare <4 x float> @llvm.vector.extract.v4f32.nxv4f32(<vscale x 4 x float>, i64)
declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
+declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
+declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index 3616a2e3c7b21..beb1dd0dad7c0 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -1099,6 +1099,23 @@ llvm.func @experimental_constrained_fptrunc(%s: f64, %v: vector<4xf32>) {
llvm.return
}
+// CHECK-LABEL: @experimental_constrained_fpext
+llvm.func @experimental_constrained_fpext(%s: f32, %v: vector<4xf32>) {
+ // CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(
+ // CHECK: metadata !"fpexcept.ignore"
+ %0 = llvm.intr.experimental.constrained.fpext %s ignore : f32 to f64
+ // CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(
+ // CHECK: metadata !"fpexcept.maytrap"
+ %1 = llvm.intr.experimental.constrained.fpext %s maytrap : f32 to f64
+ // CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(
+ // CHECK: metadata !"fpexcept.strict"
+ %2 = llvm.intr.experimental.constrained.fpext %s strict : f32 to f64
+ // CHECK: call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
+ // CHECK: metadata !"fpexcept.strict"
+ %5 = llvm.intr.experimental.constrained.fpext %v strict : vector<4xf32> to vector<4xf64>
+ llvm.return
+}
+
// Check that intrinsics are declared with appropriate types.
// CHECK-DAG: declare float @llvm.fma.f32(float, float, float)
// CHECK-DAG: declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
|
@llvm/pr-subscribers-mlir-llvm Author: Letu Ren (FantasqueX) ChangesRef: #86260 Full diff: https://github.com/llvm/llvm-project/pull/129054.diff 3 Files Affected:
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index c270b0898f865..52d76946f2363 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -456,6 +456,15 @@ def LLVM_ConstrainedFPTruncIntr
}];
}
+def LLVM_ConstrainedFPExtIntr
+ : LLVM_ConstrainedIntr<"fpext", /*numArgs=*/1,
+ /*overloadedResult=*/1, /*overloadedOperands=*/[0],
+ /*hasRoundingMode=*/0> {
+ let assemblyFormat = [{
+ $arg_0 $fpExceptionBehavior attr-dict `:` type($arg_0) `to` type(results)
+ }];
+}
+
// Intrinsics with multiple returns.
class LLVM_ArithWithOverflowOp<string mnem>
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 569b0def37856..cf88f009050ed 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -1003,6 +1003,19 @@ define void @experimental_constrained_fptrunc(double %s, <4 x double> %v) {
ret void
}
+; CHECK-LABEL: experimental_constrained_fpext
+define void @experimental_constrained_fpext(float %s, <4 x float> %v) {
+ ; CHECK: llvm.intr.experimental.constrained.fpext %{{.*}} ignore : f32 to f64
+ %1 = call double @llvm.experimental.constrained.fpext.f64.f32(float %s, metadata !"fpexcept.ignore")
+ ; CHECK: llvm.intr.experimental.constrained.fpext %{{.*}} maytrap : f32 to f64
+ %2 = call double @llvm.experimental.constrained.fpext.f64.f32(float %s, metadata !"fpexcept.maytrap")
+ ; CHECK: llvm.intr.experimental.constrained.fpext %{{.*}} strict : f32 to f64
+ %3 = call double @llvm.experimental.constrained.fpext.f64.f32(float %s, metadata !"fpexcept.strict")
+ ; CHECK: llvm.intr.experimental.constrained.fpext %{{.*}} ignore : vector<4xf32> to vector<4xf64>
+ %6 = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float> %v, metadata !"fpexcept.ignore")
+ ret void
+}
+
declare float @llvm.fmuladd.f32(float, float, float)
declare <8 x float> @llvm.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>)
declare float @llvm.fma.f32(float, float, float)
@@ -1243,3 +1256,5 @@ declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x flo
declare <4 x float> @llvm.vector.extract.v4f32.nxv4f32(<vscale x 4 x float>, i64)
declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
+declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
+declare double @llvm.experimental.constrained.fpext.f64.f32(float, metadata)
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index 3616a2e3c7b21..beb1dd0dad7c0 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -1099,6 +1099,23 @@ llvm.func @experimental_constrained_fptrunc(%s: f64, %v: vector<4xf32>) {
llvm.return
}
+// CHECK-LABEL: @experimental_constrained_fpext
+llvm.func @experimental_constrained_fpext(%s: f32, %v: vector<4xf32>) {
+ // CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(
+ // CHECK: metadata !"fpexcept.ignore"
+ %0 = llvm.intr.experimental.constrained.fpext %s ignore : f32 to f64
+ // CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(
+ // CHECK: metadata !"fpexcept.maytrap"
+ %1 = llvm.intr.experimental.constrained.fpext %s maytrap : f32 to f64
+ // CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(
+ // CHECK: metadata !"fpexcept.strict"
+ %2 = llvm.intr.experimental.constrained.fpext %s strict : f32 to f64
+ // CHECK: call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(
+ // CHECK: metadata !"fpexcept.strict"
+ %5 = llvm.intr.experimental.constrained.fpext %v strict : vector<4xf32> to vector<4xf64>
+ llvm.return
+}
+
// Check that intrinsics are declared with appropriate types.
// CHECK-DAG: declare float @llvm.fma.f32(float, float, float)
// CHECK-DAG: declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) #0
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM!
Could this be merged? |
Ref: #86260