Skip to content

[X86][FP16] Adding lowerings for FP16 ISD::LRINT and ISD::LLRINT #127382

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 22, 2025

Conversation

phoebewang
Copy link
Contributor

Address comment in #126477

@llvmbot
Copy link
Member

llvmbot commented Feb 16, 2025

@llvm/pr-subscribers-backend-x86

Author: Phoebe Wang (phoebewang)

Changes

Address comment in #126477


Patch is 142.63 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/127382.diff

4 Files Affected:

  • (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+44-7)
  • (modified) llvm/lib/Target/X86/X86InstrAVX512.td (+33)
  • (added) llvm/test/CodeGen/X86/vector-llrint-f16.ll (+599)
  • (added) llvm/test/CodeGen/X86/vector-lrint-f16.ll (+2163)
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9592137b34842..eb0c63249674e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -735,6 +735,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
     setOperationAction(ISD::FCANONICALIZE, MVT::f16, Custom);
     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
+    setOperationAction(ISD::LRINT, MVT::f16, Expand);
+    setOperationAction(ISD::LLRINT, MVT::f16, Expand);
 
     setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
     setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
@@ -2312,6 +2314,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
     setOperationAction(ISD::FMINIMUMNUM,          MVT::f16, Custom);
     setOperationAction(ISD::FP_EXTEND,            MVT::f32, Legal);
     setOperationAction(ISD::STRICT_FP_EXTEND,     MVT::f32, Legal);
+    setOperationAction(ISD::LRINT,                MVT::f16, Legal);
+    setOperationAction(ISD::LLRINT,               MVT::f16, Legal);
 
     setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
     setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);
@@ -2359,6 +2363,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
       setOperationAction(ISD::FMAXIMUM, MVT::v32f16, Custom);
       setOperationAction(ISD::FMINIMUMNUM, MVT::v32f16, Custom);
       setOperationAction(ISD::FMAXIMUMNUM, MVT::v32f16, Custom);
+      setOperationAction(ISD::LRINT, MVT::v32f16, Legal);
+      setOperationAction(ISD::LLRINT, MVT::v8f16, Legal);
     }
 
     if (Subtarget.hasVLX()) {
@@ -2413,6 +2419,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
       setOperationAction(ISD::FMAXIMUM, MVT::v16f16, Custom);
       setOperationAction(ISD::FMINIMUMNUM, MVT::v16f16, Custom);
       setOperationAction(ISD::FMAXIMUMNUM, MVT::v16f16, Custom);
+      setOperationAction(ISD::LRINT, MVT::v8f16, Legal);
+      setOperationAction(ISD::LRINT, MVT::v16f16, Legal);
     }
   }
 
@@ -34055,8 +34063,15 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
   case ISD::LRINT:
     if (N->getValueType(0) == MVT::v2i32) {
       SDValue Src = N->getOperand(0);
-      if (Src.getValueType() == MVT::v2f64)
-        Results.push_back(DAG.getNode(X86ISD::CVTP2SI, dl, MVT::v4i32, Src));
+      if (Subtarget.hasFP16() && Src.getValueType() == MVT::v2f16) {
+        Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f16, Src,
+                          DAG.getUNDEF(MVT::v2f16));
+        Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Src,
+                          DAG.getUNDEF(MVT::v4f16));
+      } else if (Src.getValueType() != MVT::v2f64) {
+        return;
+      }
+      Results.push_back(DAG.getNode(X86ISD::CVTP2SI, dl, MVT::v4i32, Src));
       return;
     }
     [[fallthrough]];
@@ -53640,13 +53655,35 @@ static SDValue combineLRINT_LLRINT(SDNode *N, SelectionDAG &DAG,
   EVT SrcVT = Src.getValueType();
   SDLoc DL(N);
 
-  if (!Subtarget.hasDQI() || !Subtarget.hasVLX() || VT != MVT::v2i64 ||
-      SrcVT != MVT::v2f32)
+  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+
+  // Let legalize expand this if it isn't a legal type yet.
+  if (!TLI.isTypeLegal(VT))
+    return SDValue();
+
+  if ((SrcVT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) ||
+      (SrcVT.getScalarType() == MVT::f32 && !Subtarget.hasDQI()))
     return SDValue();
 
-  return DAG.getNode(X86ISD::CVTP2SI, DL, VT,
-                     DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, Src,
-                                 DAG.getUNDEF(SrcVT)));
+  if (SrcVT == MVT::v2f16) {
+    SrcVT = MVT::v4f16;
+    Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, Src,
+                      DAG.getUNDEF(MVT::v2f16));
+  }
+
+  if (SrcVT == MVT::v4f16) {
+    SrcVT = MVT::v8f16;
+    Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, Src,
+                      DAG.getUNDEF(MVT::v4f16));
+  } else if (SrcVT == MVT::v2f32) {
+    SrcVT = MVT::v4f32;
+    Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, Src,
+                      DAG.getUNDEF(MVT::v2f32));
+  } else {
+    return SDValue();
+  }
+
+  return DAG.getNode(X86ISD::CVTP2SI, DL, VT, Src);
 }
 
 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 9d8c123185a7c..704007fee66db 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -13143,6 +13143,26 @@ defm VCVTTPH2UQQ : avx512_cvttph2qq<0x78, "vcvttph2uqq", X86any_cvttp2ui,
                                  SchedWriteCvtPS2DQ>, T_MAP5, PD,
                                  EVEX_CD8<16, CD8VQ>;
 
+let Predicates = [HasFP16, HasVLX] in {
+  def : Pat<(v8i16 (lrint (v8f16 VR128X:$src))), (VCVTPH2WZ128rr VR128X:$src)>;
+  def : Pat<(v8i16 (lrint (loadv8f16 addr:$src))), (VCVTPH2WZ128rm addr:$src)>;
+  def : Pat<(v16i16 (lrint (v16f16 VR256X:$src))), (VCVTPH2WZ256rr VR256X:$src)>;
+  def : Pat<(v16i16 (lrint (loadv16f16 addr:$src))), (VCVTPH2WZ256rm addr:$src)>;
+  def : Pat<(v8i32 (lrint (v8f16 VR128X:$src))), (VCVTPH2DQZ256rr VR128X:$src)>;
+  def : Pat<(v8i32 (lrint (loadv8f16 addr:$src))), (VCVTPH2DQZ256rm addr:$src)>;
+}
+
+let Predicates = [HasFP16] in {
+  def : Pat<(v32i16 (lrint (v32f16 VR512:$src))), (VCVTPH2WZrr VR512:$src)>;
+  def : Pat<(v32i16 (lrint (loadv32f16 addr:$src))), (VCVTPH2WZrm addr:$src)>;
+  def : Pat<(v16i32 (lrint (v16f16 VR256X:$src))), (VCVTPH2DQZrr VR256X:$src)>;
+  def : Pat<(v16i32 (lrint (loadv16f16 addr:$src))), (VCVTPH2DQZrm addr:$src)>;
+  def : Pat<(v8i64 (lrint (v8f16 VR128X:$src))), (VCVTPH2QQZrr VR128X:$src)>;
+  def : Pat<(v8i64 (lrint (loadv8f16 addr:$src))), (VCVTPH2QQZrm addr:$src)>;
+  def : Pat<(v8i64 (llrint (v8f16 VR128X:$src))), (VCVTPH2QQZrr VR128X:$src)>;
+  def : Pat<(v8i64 (llrint (loadv8f16 addr:$src))), (VCVTPH2QQZrm addr:$src)>;
+}
+
 // Convert Signed/Unsigned Quardword to Half
 multiclass avx512_cvtqq2ph<bits<8> opc, string OpcodeStr, SDPatternOperator OpNode,
                            SDPatternOperator MaskOpNode, SDNode OpNodeRnd,
@@ -13269,6 +13289,19 @@ defm VCVTTSH2USI64Z: avx512_cvt_s_all<0x78, "vcvttsh2usi", f16x_info, i64x_info,
                         any_fp_to_uint, X86cvtts2UInt, X86cvtts2UIntSAE, WriteCvtSS2I,
                         "{q}", HasFP16>, T_MAP5, XS, REX_W, EVEX_CD8<16, CD8VT1>;
 
+let Predicates = [HasFP16] in {
+  def : Pat<(i16 (lrint FR16:$src)), (EXTRACT_SUBREG (VCVTTSH2SIZrr FR16:$src), sub_16bit)>;
+  def : Pat<(i32 (lrint FR16:$src)), (VCVTTSH2SIZrr FR16:$src)>;
+  def : Pat<(i32 (lrint (loadf16 addr:$src))), (VCVTTSH2SIZrm addr:$src)>;
+}
+
+let Predicates = [HasFP16, In64BitMode] in {
+  def : Pat<(i64 (lrint FR16:$src)), (VCVTTSH2SI64Zrr FR16:$src)>;
+  def : Pat<(i64 (lrint (loadf16 addr:$src))), (VCVTTSH2SI64Zrm addr:$src)>;
+  def : Pat<(i64 (llrint FR16:$src)), (VCVTTSH2SI64Zrr FR16:$src)>;
+  def : Pat<(i64 (llrint (loadf16 addr:$src))), (VCVTTSH2SI64Zrm addr:$src)>;
+}
+
 let Predicates = [HasFP16] in {
   defm VCVTSI2SHZ  : avx512_vcvtsi_common<0x2A,  X86SintToFp, X86SintToFpRnd, WriteCvtI2SS, GR32,
                                    v8f16x_info, i32mem, loadi32, "cvtsi2sh", "l">,
diff --git a/llvm/test/CodeGen/X86/vector-llrint-f16.ll b/llvm/test/CodeGen/X86/vector-llrint-f16.ll
new file mode 100644
index 0000000000000..dc758952c3dcb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-llrint-f16.ll
@@ -0,0 +1,599 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/XRINT/lrint/g' %s | llc -mtriple=x86_64-unknown -mattr=avx2,f16c | FileCheck %s --check-prefix=AVX
+; RUN: sed 's/XRINT/llrint/g' %s | llc -mtriple=x86_64-unknown -mattr=avx2,f16c | FileCheck %s --check-prefix=AVX
+; RUN: sed 's/XRINT/lrint/g' %s | llc -mtriple=x86_64-unknown -mattr=avx512fp16,avx512vl | FileCheck %s --check-prefix=FP16
+; RUN: sed 's/XRINT/llrint/g' %s | llc -mtriple=x86_64-unknown -mattr=avx512fp16,avx512vl | FileCheck %s --check-prefix=FP16
+
+define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
+; AVX-LABEL: llrint_v1i64_v1f16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vcvttss2si %xmm0, %rax
+; AVX-NEXT:    retq
+;
+; FP16-LABEL: llrint_v1i64_v1f16:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vcvttsh2si %xmm0, %rax
+; FP16-NEXT:    retq
+  %a = call <1 x i64> @llvm.XRINT.v1i64.v1f16(<1 x half> %x)
+  ret <1 x i64> %a
+}
+
+define <2 x i64> @llrint_v2i64_v2f16(<2 x half> %x) {
+; AVX-LABEL: llrint_v2i64_v2f16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm1
+; AVX-NEXT:    vroundss $4, %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vcvttss2si %xmm1, %rax
+; AVX-NEXT:    vmovq %rax, %xmm1
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vcvttss2si %xmm0, %rax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT:    retq
+;
+; FP16-LABEL: llrint_v2i64_v2f16:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vcvtph2qq %xmm0, %xmm0
+; FP16-NEXT:    retq
+  %a = call <2 x i64> @llvm.XRINT.v2i64.v2f16(<2 x half> %x)
+  ret <2 x i64> %a
+}
+
+define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
+; AVX-LABEL: llrint_v4i64_v4f16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vroundss $4, %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vcvttss2si %xmm1, %rax
+; AVX-NEXT:    vmovq %rax, %xmm1
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX-NEXT:    vroundss $4, %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX-NEXT:    vcvttss2si %xmm2, %rax
+; AVX-NEXT:    vmovq %rax, %xmm2
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm2
+; AVX-NEXT:    vroundss $4, %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX-NEXT:    vcvttss2si %xmm2, %rax
+; AVX-NEXT:    vmovq %rax, %xmm2
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vcvttss2si %xmm0, %rax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; AVX-NEXT:    retq
+;
+; FP16-LABEL: llrint_v4i64_v4f16:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vcvtph2qq %xmm0, %ymm0
+; FP16-NEXT:    retq
+  %a = call <4 x i64> @llvm.XRINT.v4i64.v4f16(<4 x half> %x)
+  ret <4 x i64> %a
+}
+
+define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
+; AVX-LABEL: llrint_v8i64_v8f16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vroundss $4, %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vcvttss2si %xmm1, %rax
+; AVX-NEXT:    vmovq %rax, %xmm1
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX-NEXT:    vroundss $4, %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX-NEXT:    vcvttss2si %xmm2, %rax
+; AVX-NEXT:    vmovq %rax, %xmm2
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm2
+; AVX-NEXT:    vroundss $4, %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX-NEXT:    vcvttss2si %xmm2, %rax
+; AVX-NEXT:    vmovq %rax, %xmm2
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[1,1,1,1,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vroundss $4, %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vcvttss2si %xmm3, %rax
+; AVX-NEXT:    vmovq %rax, %xmm3
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; AVX-NEXT:    vinserti128 $1, %xmm1, %ymm2, %ymm2
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vroundss $4, %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vcvttss2si %xmm1, %rax
+; AVX-NEXT:    vmovq %rax, %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vroundss $4, %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vcvttss2si %xmm3, %rax
+; AVX-NEXT:    vmovq %rax, %xmm3
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vroundss $4, %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vcvttss2si %xmm3, %rax
+; AVX-NEXT:    vmovq %rax, %xmm3
+; AVX-NEXT:    vshufpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vcvttss2si %xmm0, %rax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; AVX-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX-NEXT:    vmovdqa %ymm2, %ymm0
+; AVX-NEXT:    retq
+;
+; FP16-LABEL: llrint_v8i64_v8f16:
+; FP16:       # %bb.0:
+; FP16-NEXT:    vcvtph2qq %xmm0, %zmm0
+; FP16-NEXT:    retq
+  %a = call <8 x i64> @llvm.XRINT.v8i64.v8f16(<8 x half> %x)
+  ret <8 x i64> %a
+}
+
+define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
+; AVX-LABEL: llrint_v16i64_v16f16:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa %ymm0, %ymm2
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm2[3,3,3,3,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vroundss $4, %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; AVX-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX-NEXT:    vcvttss2si %xmm0, %rax
+; AVX-NEXT:    vmovq %rax, %xmm0
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vroundss $4, %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vcvttss2si %xmm1, %rax
+; AVX-NEXT:    vmovq %rax, %xmm1
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm1
+; AVX-NEXT:    vroundss $4, %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vcvttss2si %xmm1, %rax
+; AVX-NEXT:    vmovq %rax, %xmm1
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[1,1,1,1,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vroundss $4, %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vcvttss2si %xmm3, %rax
+; AVX-NEXT:    vmovq %rax, %xmm3
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
+; AVX-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vroundss $4, %xmm1, %xmm1, %xmm1
+; AVX-NEXT:    vcvtps2ph $4, %xmm1, %xmm1
+; AVX-NEXT:    vcvtph2ps %xmm1, %xmm1
+; AVX-NEXT:    vcvttss2si %xmm1, %rax
+; AVX-NEXT:    vmovq %rax, %xmm1
+; AVX-NEXT:    vshufps {{.*#+}} xmm3 = xmm2[3,3,3,3]
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vroundss $4, %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vcvttss2si %xmm3, %rax
+; AVX-NEXT:    vmovq %rax, %xmm3
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vroundss $4, %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vcvttss2si %xmm3, %rax
+; AVX-NEXT:    vmovq %rax, %xmm3
+; AVX-NEXT:    vshufpd {{.*#+}} xmm4 = xmm2[1,0]
+; AVX-NEXT:    vcvtph2ps %xmm4, %xmm4
+; AVX-NEXT:    vroundss $4, %xmm4, %xmm4, %xmm4
+; AVX-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; AVX-NEXT:    vcvtph2ps %xmm4, %xmm4
+; AVX-NEXT:    vcvttss2si %xmm4, %rax
+; AVX-NEXT:    vmovq %rax, %xmm4
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm4[0],xmm3[0]
+; AVX-NEXT:    vinserti128 $1, %xmm1, %ymm3, %ymm1
+; AVX-NEXT:    vextracti128 $1, %ymm2, %xmm3
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm3[3,3,3,3,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX-NEXT:    vroundss $4, %xmm2, %xmm2, %xmm2
+; AVX-NEXT:    vcvtps2ph $4, %xmm2, %xmm2
+; AVX-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX-NEXT:    vcvttss2si %xmm2, %rax
+; AVX-NEXT:    vmovq %rax, %xmm2
+; AVX-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX-NEXT:    vcvtph2ps %xmm4, %xmm4
+; AVX-NEXT:    vroundss $4, %xmm4, %xmm4, %xmm4
+; AVX-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; AVX-NEXT:    vcvtph2ps %xmm4, %xmm4
+; AVX-NEXT:    vcvttss2si %xmm4, %rax
+; AVX-NEXT:    vmovq %rax, %xmm4
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm4[0],xmm2[0]
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm4
+; AVX-NEXT:    vroundss $4, %xmm4, %xmm4, %xmm4
+; AVX-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; AVX-NEXT:    vcvtph2ps %xmm4, %xmm4
+; AVX-NEXT:    vcvttss2si %xmm4, %rax
+; AVX-NEXT:    vmovq %rax, %xmm4
+; AVX-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm3[1,1,1,1,4,5,6,7]
+; AVX-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX-NEXT:    vroundss $4, %xmm5, %xmm5, %xmm5
+; AVX-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; AVX-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX-NEXT:    vcvttss2si %xmm5, %rax
+; AVX-NEXT:    vmovq %rax, %xmm5
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
+; AVX-NEXT:    vinserti128 $1, %xmm2, %ymm4, %ymm2
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm3[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    vcvtph2ps %xmm4, %xmm4
+; AVX-NEXT:    vroundss $4, %xmm4, %xmm4, %xmm4
+; AVX-NEXT:    vcvtps2ph $4, %xmm4, %xmm4
+; AVX-NEXT:    vcvtph2ps %xmm4, %xmm4
+; AVX-NEXT:    vcvttss2si %xmm4, %rax
+; AVX-NEXT:    vmovq %rax, %xmm4
+; AVX-NEXT:    vshufps {{.*#+}} xmm5 = xmm3[3,3,3,3]
+; AVX-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX-NEXT:    vroundss $4, %xmm5, %xmm5, %xmm5
+; AVX-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; AVX-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX-NEXT:    vcvttss2si %xmm5, %rax
+; AVX-NEXT:    vmovq %rax, %xmm5
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm4 = xmm5[0],xmm4[0]
+; AVX-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX-NEXT:    vroundss $4, %xmm5, %xmm5, %xmm5
+; AVX-NEXT:    vcvtps2ph $4, %xmm5, %xmm5
+; AVX-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX-NEXT:    vcvttss2si %xmm5, %rax
+; AVX-NEXT:    vmovq %rax, %xmm5
+; AVX-NEXT:    vshufpd {{.*#+}} xmm3 = xmm3[1,0]
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vroundss $4, %xmm3, %xmm3, %xmm3
+; AVX-NEXT:    vcvtps2ph $4, %xmm3, %xmm3
+; AVX-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX-NEXT:    vcvttss2si %xmm3, %rax
+; AVX-NEXT:    vmovq %rax, %xmm3
+; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
+; AVX-NEXT:    vinserti128 $1, %xmm4, %ymm3, %ymm3
+; AVX-NEXT:    retq
+;
+; FP16-LABEL: llrint_v16i64_v16f16:
+; FP16:       # %bb.0:
+; FP16-NEXT:    ...
[truncated]

Copy link

⚠️ C/C++ code formatter, clang-format found issues in your code. ⚠️

You can test this locally with the following command:
git-clang-format --diff 3e3af861b4a48b90041fad7ab8e9bc17e8a0a602 26ff6810f1a52a8f13302d7409ea363d9d6fc50b --extensions cpp -- llvm/lib/Target/X86/X86ISelLowering.cpp
View the diff from clang-format here.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index eb0c632496..f2a0fa7de1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2314,8 +2314,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
     setOperationAction(ISD::FMINIMUMNUM,          MVT::f16, Custom);
     setOperationAction(ISD::FP_EXTEND,            MVT::f32, Legal);
     setOperationAction(ISD::STRICT_FP_EXTEND,     MVT::f32, Legal);
-    setOperationAction(ISD::LRINT,                MVT::f16, Legal);
-    setOperationAction(ISD::LLRINT,               MVT::f16, Legal);
+    setOperationAction(ISD::LRINT, MVT::f16, Legal);
+    setOperationAction(ISD::LLRINT, MVT::f16, Legal);
 
     setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
     setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);

Copy link
Collaborator

@RKSimon RKSimon left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

@phoebewang phoebewang merged commit fa64a21 into llvm:main Feb 22, 2025
9 of 10 checks passed
@phoebewang phoebewang deleted the lrint2 branch February 22, 2025 13:17
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants