Skip to content

[mlir][llvmir] add llvm.experimental.constrained.uitofp intrinsics #133300

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Mar 28, 2025

Conversation

FantasqueX
Copy link
Contributor

@FantasqueX FantasqueX marked this pull request as ready for review March 27, 2025 19:01
@llvmbot
Copy link
Member

llvmbot commented Mar 27, 2025

@llvm/pr-subscribers-mlir-llvm

Author: Letu Ren (FantasqueX)

Changes

https://llvm.org/docs/LangRef.html#llvm-experimental-constrained-uitofp-intrinsic


Full diff: https://github.com/llvm/llvm-project/pull/133300.diff

3 Files Affected:

  • (modified) mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td (+9)
  • (modified) mlir/test/Target/LLVMIR/Import/intrinsic.ll (+21-2)
  • (modified) mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir (+31)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index dfb35228cbd29..2aed2a0d0816d 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -463,6 +463,15 @@ class LLVM_ConstrainedIntr<string mnem, int numArgs,
   }];
 }
 
+def LLVM_ConstrainedUIToFP
+    : LLVM_ConstrainedIntr<"uitofp", /*numArgs=*/1,
+        /*overloadedResult=*/1, /*overloadedOperands=*/[0],
+        /*hasRoundingMode=*/1> {
+    let assemblyFormat = [{
+        $arg_0 $roundingmode $fpExceptionBehavior attr-dict `:` type($arg_0) `to` type(results)
+    }];
+}
+
 def LLVM_ConstrainedSIToFP
     : LLVM_ConstrainedIntr<"sitofp", /*numArgs=*/1,
         /*overloadedResult=*/1, /*overloadedOperands=*/[0],
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 02fea85b028b3..7c2977f1d5f4c 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -1043,6 +1043,23 @@ define void @experimental_constrained_fptrunc(double %s, <4 x double> %v) {
   ret void
 }
 
+; CHECK-LABEL: experimental_constrained_uitofp
+define void @experimental_constrained_uitofp(i32 %s, <4 x i32> %v) {
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} towardzero ignore : i32 to f32
+  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.towardzero", metadata !"fpexcept.ignore")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} tonearest maytrap : i32 to f32
+  %2 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} upward strict : i32 to f32
+  %3 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.upward", metadata !"fpexcept.strict")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} downward ignore : i32 to f32
+  %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.downward", metadata !"fpexcept.ignore")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} tonearestaway ignore : i32 to f32
+  %5 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.tonearestaway", metadata !"fpexcept.ignore")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} tonearestaway ignore : vector<4xi32> to vector<4xf32>
+  %6 = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %v, metadata !"round.tonearestaway", metadata !"fpexcept.ignore")
+  ret void
+}
+
 ; CHECK-LABEL: experimental_constrained_sitofp
 define void @experimental_constrained_sitofp(i32 %s, <4 x i32> %v) {
   ; CHECK: llvm.intr.experimental.constrained.sitofp %{{.*}} towardzero ignore : i32 to f32
@@ -1317,8 +1334,10 @@ declare ptr @llvm.ptrmask.p0.i64(ptr, i64)
 declare <8 x ptr> @llvm.ptrmask.v8p0.v8i64(<8 x ptr>, <8 x i64>)
 declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
 declare <4 x float> @llvm.vector.extract.v4f32.nxv4f32(<vscale x 4 x float>, i64)
-declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4i32(<4 x i32>, metadata, metadata)
-declare float @llvm.experimental.constrained.fptrunc.f32.i32(i32, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
 declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
 declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index d33f99d44eb88..74456996a0e0d 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -1111,6 +1111,35 @@ llvm.func @vector_ptrmask(%p: !llvm.vec<8 x ptr>, %mask: vector<8 x i64>) -> !ll
   llvm.return %0 : !llvm.vec<8 x ptr>
 }
 
+// CHECK-LABEL: @experimental_constrained_uitofp
+llvm.func @experimental_constrained_uitofp(%s: i32, %v: vector<4 x i32>) {
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.towardzero"
+  // CHECK: metadata !"fpexcept.ignore"
+  %0 = llvm.intr.experimental.constrained.uitofp %s towardzero ignore : i32 to f32
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.tonearest"
+  // CHECK: metadata !"fpexcept.maytrap"
+  %1 = llvm.intr.experimental.constrained.uitofp %s tonearest maytrap : i32 to f32
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.upward"
+  // CHECK: metadata !"fpexcept.strict"
+  %2 = llvm.intr.experimental.constrained.uitofp %s upward strict : i32 to f32
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.downward"
+  // CHECK: metadata !"fpexcept.ignore"
+  %3 = llvm.intr.experimental.constrained.uitofp %s downward ignore : i32 to f32
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.tonearestaway"
+  // CHECK: metadata !"fpexcept.ignore"
+  %4 = llvm.intr.experimental.constrained.uitofp %s tonearestaway ignore : i32 to f32
+  // CHECK: call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(
+  // CHECK: metadata !"round.upward"
+  // CHECK: metadata !"fpexcept.strict"
+  %5 = llvm.intr.experimental.constrained.uitofp %v upward strict : vector<4 x i32> to vector<4 x f32>
+  llvm.return
+}
+
 // CHECK-LABEL: @experimental_constrained_sitofp
 llvm.func @experimental_constrained_sitofp(%s: i32, %v: vector<4 x i32>) {
   // CHECK: call float @llvm.experimental.constrained.sitofp.f32.i32(
@@ -1356,6 +1385,8 @@ llvm.func @experimental_constrained_fpext(%s: f32, %v: vector<4xf32>) {
 // CHECK-DAG: declare ptr addrspace(1) @llvm.stacksave.p1()
 // CHECK-DAG: declare void @llvm.stackrestore.p0(ptr)
 // CHECK-DAG: declare void @llvm.stackrestore.p1(ptr addrspace(1))
+// CHECK-DAG: declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+// CHECK-DAG: declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
 // CHECK-DAG: declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
 // CHECK-DAG: declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
 // CHECK-DAG: declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)

@llvmbot
Copy link
Member

llvmbot commented Mar 27, 2025

@llvm/pr-subscribers-mlir

Author: Letu Ren (FantasqueX)

Changes

https://llvm.org/docs/LangRef.html#llvm-experimental-constrained-uitofp-intrinsic


Full diff: https://github.com/llvm/llvm-project/pull/133300.diff

3 Files Affected:

  • (modified) mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td (+9)
  • (modified) mlir/test/Target/LLVMIR/Import/intrinsic.ll (+21-2)
  • (modified) mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir (+31)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index dfb35228cbd29..2aed2a0d0816d 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -463,6 +463,15 @@ class LLVM_ConstrainedIntr<string mnem, int numArgs,
   }];
 }
 
+def LLVM_ConstrainedUIToFP
+    : LLVM_ConstrainedIntr<"uitofp", /*numArgs=*/1,
+        /*overloadedResult=*/1, /*overloadedOperands=*/[0],
+        /*hasRoundingMode=*/1> {
+    let assemblyFormat = [{
+        $arg_0 $roundingmode $fpExceptionBehavior attr-dict `:` type($arg_0) `to` type(results)
+    }];
+}
+
 def LLVM_ConstrainedSIToFP
     : LLVM_ConstrainedIntr<"sitofp", /*numArgs=*/1,
         /*overloadedResult=*/1, /*overloadedOperands=*/[0],
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 02fea85b028b3..7c2977f1d5f4c 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -1043,6 +1043,23 @@ define void @experimental_constrained_fptrunc(double %s, <4 x double> %v) {
   ret void
 }
 
+; CHECK-LABEL: experimental_constrained_uitofp
+define void @experimental_constrained_uitofp(i32 %s, <4 x i32> %v) {
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} towardzero ignore : i32 to f32
+  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.towardzero", metadata !"fpexcept.ignore")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} tonearest maytrap : i32 to f32
+  %2 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} upward strict : i32 to f32
+  %3 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.upward", metadata !"fpexcept.strict")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} downward ignore : i32 to f32
+  %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.downward", metadata !"fpexcept.ignore")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} tonearestaway ignore : i32 to f32
+  %5 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %s, metadata !"round.tonearestaway", metadata !"fpexcept.ignore")
+  ; CHECK: llvm.intr.experimental.constrained.uitofp %{{.*}} tonearestaway ignore : vector<4xi32> to vector<4xf32>
+  %6 = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %v, metadata !"round.tonearestaway", metadata !"fpexcept.ignore")
+  ret void
+}
+
 ; CHECK-LABEL: experimental_constrained_sitofp
 define void @experimental_constrained_sitofp(i32 %s, <4 x i32> %v) {
   ; CHECK: llvm.intr.experimental.constrained.sitofp %{{.*}} towardzero ignore : i32 to f32
@@ -1317,8 +1334,10 @@ declare ptr @llvm.ptrmask.p0.i64(ptr, i64)
 declare <8 x ptr> @llvm.ptrmask.v8p0.v8i64(<8 x ptr>, <8 x i64>)
 declare <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v4f32(<vscale x 4 x float>, <4 x float>, i64)
 declare <4 x float> @llvm.vector.extract.v4f32.nxv4f32(<vscale x 4 x float>, i64)
-declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4i32(<4 x i32>, metadata, metadata)
-declare float @llvm.experimental.constrained.fptrunc.f32.i32(i32, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
+declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
 declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
 declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)
 declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index d33f99d44eb88..74456996a0e0d 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -1111,6 +1111,35 @@ llvm.func @vector_ptrmask(%p: !llvm.vec<8 x ptr>, %mask: vector<8 x i64>) -> !ll
   llvm.return %0 : !llvm.vec<8 x ptr>
 }
 
+// CHECK-LABEL: @experimental_constrained_uitofp
+llvm.func @experimental_constrained_uitofp(%s: i32, %v: vector<4 x i32>) {
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.towardzero"
+  // CHECK: metadata !"fpexcept.ignore"
+  %0 = llvm.intr.experimental.constrained.uitofp %s towardzero ignore : i32 to f32
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.tonearest"
+  // CHECK: metadata !"fpexcept.maytrap"
+  %1 = llvm.intr.experimental.constrained.uitofp %s tonearest maytrap : i32 to f32
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.upward"
+  // CHECK: metadata !"fpexcept.strict"
+  %2 = llvm.intr.experimental.constrained.uitofp %s upward strict : i32 to f32
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.downward"
+  // CHECK: metadata !"fpexcept.ignore"
+  %3 = llvm.intr.experimental.constrained.uitofp %s downward ignore : i32 to f32
+  // CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
+  // CHECK: metadata !"round.tonearestaway"
+  // CHECK: metadata !"fpexcept.ignore"
+  %4 = llvm.intr.experimental.constrained.uitofp %s tonearestaway ignore : i32 to f32
+  // CHECK: call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(
+  // CHECK: metadata !"round.upward"
+  // CHECK: metadata !"fpexcept.strict"
+  %5 = llvm.intr.experimental.constrained.uitofp %v upward strict : vector<4 x i32> to vector<4 x f32>
+  llvm.return
+}
+
 // CHECK-LABEL: @experimental_constrained_sitofp
 llvm.func @experimental_constrained_sitofp(%s: i32, %v: vector<4 x i32>) {
   // CHECK: call float @llvm.experimental.constrained.sitofp.f32.i32(
@@ -1356,6 +1385,8 @@ llvm.func @experimental_constrained_fpext(%s: f32, %v: vector<4xf32>) {
 // CHECK-DAG: declare ptr addrspace(1) @llvm.stacksave.p1()
 // CHECK-DAG: declare void @llvm.stackrestore.p0(ptr)
 // CHECK-DAG: declare void @llvm.stackrestore.p1(ptr addrspace(1))
+// CHECK-DAG: declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
+// CHECK-DAG: declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
 // CHECK-DAG: declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
 // CHECK-DAG: declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
 // CHECK-DAG: declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata)

Copy link
Contributor

@gysit gysit left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks!

LGTM

For follow up operations it is fine to shorten test if things are already tested in other places!

// CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(
// CHECK: metadata !"round.towardzero"
// CHECK: metadata !"fpexcept.ignore"
%0 = llvm.intr.experimental.constrained.uitofp %s towardzero ignore : i32 to f32
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that checking all combinations here is not really necessary since the logic is already tested by sitofp and probably other operations.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, I'll take into account when implementing next intrinsics.

@gysit gysit merged commit 29cb003 into llvm:main Mar 28, 2025
16 checks passed
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants