Skip to content

[X86] Add missing reg/imm attributes to VRNDSCALES instruction names #117203

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 22, 2024

Conversation

RKSimon
Copy link
Collaborator

@RKSimon RKSimon commented Nov 21, 2024

More canonicalization of the instruction names to make the predictable - more closely matches VRNDSCALEP / VROUND equivalent instructions

@llvmbot
Copy link
Member

llvmbot commented Nov 21, 2024

@llvm/pr-subscribers-backend-x86

@llvm/pr-subscribers-tablegen

Author: Simon Pilgrim (RKSimon)

Changes

More canonicalization of the instruction names to make the predictable - more closely matches VRNDSCALEP / VROUND equivalent instructions


Patch is 25.76 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/117203.diff

8 Files Affected:

  • (modified) llvm/lib/Target/X86/X86CompressEVEX.cpp (+8-8)
  • (modified) llvm/lib/Target/X86/X86InstrAVX512.td (+7-7)
  • (modified) llvm/lib/Target/X86/X86InstrInfo.cpp (+21-21)
  • (modified) llvm/lib/Target/X86/X86SchedSapphireRapids.td (+9-9)
  • (modified) llvm/test/CodeGen/X86/evex-to-vex-compress.mir (+40-40)
  • (modified) llvm/test/TableGen/x86-fold-tables.inc (+12-12)
  • (modified) llvm/test/TableGen/x86-instr-mapping.inc (+8-8)
  • (modified) llvm/utils/TableGen/X86ManualInstrMapping.def (+4-4)
diff --git a/llvm/lib/Target/X86/X86CompressEVEX.cpp b/llvm/lib/Target/X86/X86CompressEVEX.cpp
index 6fb480c37e1ff8..3ecff27be05d83 100644
--- a/llvm/lib/Target/X86/X86CompressEVEX.cpp
+++ b/llvm/lib/Target/X86/X86CompressEVEX.cpp
@@ -154,14 +154,14 @@ static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc) {
   case X86::VRNDSCALEPDZ256rmi:
   case X86::VRNDSCALEPSZ256rri:
   case X86::VRNDSCALEPSZ256rmi:
-  case X86::VRNDSCALESDZr:
-  case X86::VRNDSCALESDZm:
-  case X86::VRNDSCALESSZr:
-  case X86::VRNDSCALESSZm:
-  case X86::VRNDSCALESDZr_Int:
-  case X86::VRNDSCALESDZm_Int:
-  case X86::VRNDSCALESSZr_Int:
-  case X86::VRNDSCALESSZm_Int:
+  case X86::VRNDSCALESDZri:
+  case X86::VRNDSCALESDZmi:
+  case X86::VRNDSCALESSZri:
+  case X86::VRNDSCALESSZmi:
+  case X86::VRNDSCALESDZri_Int:
+  case X86::VRNDSCALESDZmi_Int:
+  case X86::VRNDSCALESSZri_Int:
+  case X86::VRNDSCALESSZmi_Int:
     const MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands() - 1);
     int64_t ImmVal = Imm.getImm();
     // Ensure that only bits 3:0 of the immediate are used.
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index a05a3063cac559..dcb0e1d72ab033 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -9596,7 +9596,7 @@ defm VSQRT : avx512_sqrt_scalar_all<0x51, "vsqrt", SchedWriteFSqrtSizes>, VEX_LI
 multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
                                   X86FoldableSchedWrite sched, X86VectorVTInfo _> {
   let ExeDomain = _.ExeDomain in {
-  defm r_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+  defm ri_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
                            (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
                            "$src3, $src2, $src1", "$src1, $src2, $src3",
                            (_.VT (X86RndScales (_.VT _.RC:$src1), (_.VT _.RC:$src2),
@@ -9604,14 +9604,14 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
                            Sched<[sched]>, SIMD_EXC;
 
   let Uses = [MXCSR] in
-  defm rb_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
+  defm rib_Int : AVX512_maskable_scalar<opc, MRMSrcReg, _, (outs _.RC:$dst),
                          (ins _.RC:$src1, _.RC:$src2, i32u8imm:$src3), OpcodeStr,
                          "$src3, {sae}, $src2, $src1", "$src1, $src2, {sae}, $src3",
                          (_.VT (X86RndScalesSAE (_.VT _.RC:$src1), (_.VT _.RC:$src2),
                          (i32 timm:$src3)))>, EVEX_B,
                          Sched<[sched]>;
 
-  defm m_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
+  defm mi_Int : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
                          (ins _.RC:$src1, _.IntScalarMemOp:$src2, i32u8imm:$src3),
                          OpcodeStr,
                          "$src3, $src2, $src1", "$src1, $src2, $src3",
@@ -9620,13 +9620,13 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
                          Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
 
   let isCodeGenOnly = 1, hasSideEffects = 0, Predicates = [HasAVX512] in {
-    def r : I<opc, MRMSrcReg, (outs _.FRC:$dst),
+    def ri : I<opc, MRMSrcReg, (outs _.FRC:$dst),
                (ins _.FRC:$src1, _.FRC:$src2, i32u8imm:$src3),
                OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
                []>, Sched<[sched]>, SIMD_EXC;
 
     let mayLoad = 1 in
-      def m : I<opc, MRMSrcMem, (outs _.FRC:$dst),
+      def mi : I<opc, MRMSrcMem, (outs _.FRC:$dst),
                  (ins _.FRC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
                  OpcodeStr#"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
                  []>, Sched<[sched.Folded, sched.ReadAfterFold]>, SIMD_EXC;
@@ -9635,13 +9635,13 @@ multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
 
   let Predicates = [HasAVX512] in {
     def : Pat<(X86any_VRndScale _.FRC:$src1, timm:$src2),
-              (_.EltVT (!cast<Instruction>(NAME#r) (_.EltVT (IMPLICIT_DEF)),
+              (_.EltVT (!cast<Instruction>(NAME#ri) (_.EltVT (IMPLICIT_DEF)),
                _.FRC:$src1, timm:$src2))>;
   }
 
   let Predicates = [HasAVX512, OptForSize] in {
     def : Pat<(X86any_VRndScale (_.ScalarLdFrag addr:$src1), timm:$src2),
-              (_.EltVT (!cast<Instruction>(NAME#m) (_.EltVT (IMPLICIT_DEF)),
+              (_.EltVT (!cast<Instruction>(NAME#mi) (_.EltVT (IMPLICIT_DEF)),
                addr:$src1, timm:$src2))>;
   }
 }
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index e8a50227912d8b..ab39e444243274 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -6971,16 +6971,16 @@ static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
   case X86::VGETMANTSSZrri:
   case X86::VGETMANTSSZrrib:
   case X86::VGETMANTSSZrmi:
-  case X86::VRNDSCALESDZr:
-  case X86::VRNDSCALESDZr_Int:
-  case X86::VRNDSCALESDZrb_Int:
-  case X86::VRNDSCALESDZm:
-  case X86::VRNDSCALESDZm_Int:
-  case X86::VRNDSCALESSZr:
-  case X86::VRNDSCALESSZr_Int:
-  case X86::VRNDSCALESSZrb_Int:
-  case X86::VRNDSCALESSZm:
-  case X86::VRNDSCALESSZm_Int:
+  case X86::VRNDSCALESDZri:
+  case X86::VRNDSCALESDZri_Int:
+  case X86::VRNDSCALESDZrib_Int:
+  case X86::VRNDSCALESDZmi:
+  case X86::VRNDSCALESDZmi_Int:
+  case X86::VRNDSCALESSZri:
+  case X86::VRNDSCALESSZri_Int:
+  case X86::VRNDSCALESSZrib_Int:
+  case X86::VRNDSCALESSZmi:
+  case X86::VRNDSCALESSZmi_Int:
   case X86::VRCP14SDZrr:
   case X86::VRCP14SDZrm:
   case X86::VRCP14SSZrr:
@@ -6998,11 +6998,11 @@ static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
   case X86::VGETMANTSHZrri:
   case X86::VGETMANTSHZrrib:
   case X86::VGETMANTSHZrmi:
-  case X86::VRNDSCALESHZr:
-  case X86::VRNDSCALESHZr_Int:
-  case X86::VRNDSCALESHZrb_Int:
-  case X86::VRNDSCALESHZm:
-  case X86::VRNDSCALESHZm_Int:
+  case X86::VRNDSCALESHZri:
+  case X86::VRNDSCALESHZri_Int:
+  case X86::VRNDSCALESHZrib_Int:
+  case X86::VRNDSCALESHZmi:
+  case X86::VRNDSCALESHZmi_Int:
   case X86::VSQRTSHZr:
   case X86::VSQRTSHZr_Int:
   case X86::VSQRTSHZrb_Int:
@@ -7790,9 +7790,9 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::VREDUCESSZrri:
     case X86::VREDUCESSZrrik:
     case X86::VREDUCESSZrrikz:
-    case X86::VRNDSCALESSZr_Int:
-    case X86::VRNDSCALESSZr_Intk:
-    case X86::VRNDSCALESSZr_Intkz:
+    case X86::VRNDSCALESSZri_Int:
+    case X86::VRNDSCALESSZri_Intk:
+    case X86::VRNDSCALESSZri_Intkz:
     case X86::VRSQRT14SSZrr:
     case X86::VRSQRT14SSZrrk:
     case X86::VRSQRT14SSZrrkz:
@@ -7959,9 +7959,9 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
     case X86::VREDUCESDZrri:
     case X86::VREDUCESDZrrik:
     case X86::VREDUCESDZrrikz:
-    case X86::VRNDSCALESDZr_Int:
-    case X86::VRNDSCALESDZr_Intk:
-    case X86::VRNDSCALESDZr_Intkz:
+    case X86::VRNDSCALESDZri_Int:
+    case X86::VRNDSCALESDZri_Intk:
+    case X86::VRNDSCALESDZri_Intkz:
     case X86::VRSQRT14SDZrr:
     case X86::VRSQRT14SDZrrk:
     case X86::VRSQRT14SDZrrkz:
diff --git a/llvm/lib/Target/X86/X86SchedSapphireRapids.td b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
index 4344a48a526281..448f7f20dedffd 100644
--- a/llvm/lib/Target/X86/X86SchedSapphireRapids.td
+++ b/llvm/lib/Target/X86/X86SchedSapphireRapids.td
@@ -2300,8 +2300,8 @@ def : InstRW<[SPRWriteResGroup218, ReadAfterVecXLd], (instregex "^(V?)ROUNDS(D|S
                                                                 "^VRNDSCALEP(D|S)Z128rm(bi|ik)$",
                                                                 "^VRNDSCALEP(D|S)Z128rmbik(z?)$",
                                                                 "^VRNDSCALEP(D|S)Z128rmi((kz)?)$",
-                                                                "^VRNDSCALES(D|S)Zm$",
-                                                                "^VRNDSCALES(D|S)Zm_Int((k|kz)?)$")>;
+                                                                "^VRNDSCALES(D|S)Zmi$",
+                                                                "^VRNDSCALES(D|S)Zmi_Int((k|kz)?)$")>;
 
 def SPRWriteResGroup219 : SchedWriteRes<[SPRPort00_01]> {
   let ReleaseAtCycles = [2];
@@ -2312,8 +2312,8 @@ def : InstRW<[SPRWriteResGroup219], (instregex "^(V?)ROUND(PD|SS)ri$",
                                                "^(V?)ROUND(PS|SD)ri$",
                                                "^(V?)ROUNDS(D|S)ri_Int$",
                                                "^VRNDSCALEP(D|S)Z(128|256)rri((k|kz)?)$",
-                                               "^VRNDSCALES(D|S)Zr$",
-                                               "^VRNDSCALES(D|S)Zr(b?)_Int((k|kz)?)$",
+                                               "^VRNDSCALES(D|S)Zri$",
+                                               "^VRNDSCALES(D|S)Zri(b?)_Int((k|kz)?)$",
                                                "^VROUNDP(D|S)Yri$")>;
 
 def SPRWriteResGroup220 : SchedWriteRes<[SPRPort00_06]> {
@@ -3747,7 +3747,7 @@ def : InstRW<[SPRWriteResGroup390, ReadAfterVecXLd], (instregex "^VF(C?)MADDCSHZ
                                                                 "^VF(C?)MULCPHZ128rm(b?)$",
                                                                 "^VF(C?)MULCSHZrm$",
                                                                 "^VRNDSCALEPHZ128rm(b?)i$",
-                                                                "^VRNDSCALESHZm((_Int)?)$",
+                                                                "^VRNDSCALESHZmi((_Int)?)$",
                                                                 "^VSCALEFPHZ128rm(b?)$")>;
 def : InstRW<[SPRWriteResGroup390, ReadAfterVecYLd], (instregex "^VF(C?)MULCPHZ256rm(b?)$",
                                                                 "^VRNDSCALEP(D|H|S)Z256rm(b?)i$",
@@ -3779,9 +3779,9 @@ def : InstRW<[SPRWriteResGroup392], (instregex "^VF(C?)MADDCPHZ(128|256)r$",
                                                "^VF(C?)MULCPHZ(128|256)rr$",
                                                "^VF(C?)MULCSHZrr(b?)$",
                                                "^VRNDSCALEPHZ(128|256)rri$",
-                                               "^VRNDSCALESHZr(b?)_Int$",
+                                               "^VRNDSCALESHZri(b?)_Int$",
                                                "^VSCALEFPHZ(128|256)rr$")>;
-def : InstRW<[SPRWriteResGroup392], (instrs VRNDSCALESHZr,
+def : InstRW<[SPRWriteResGroup392], (instrs VRNDSCALESHZri,
                                             VSCALEFSHZrr,
                                             VSCALEFSHZrrb_Int)>;
 
@@ -4884,7 +4884,7 @@ def SPRWriteResGroup534 : SchedWriteRes<[SPRPort00_01, SPRPort02_03_11]> {
   let NumMicroOps = 3;
 }
 def : InstRW<[SPRWriteResGroup534, ReadAfterVecXLd], (instregex "^VRNDSCALEPHZ128rm(b?)ik(z?)$",
-                                                                "^VRNDSCALESHZm_Intk(z?)$",
+                                                                "^VRNDSCALESHZmi_Intk(z?)$",
                                                                 "^VSCALEFPHZ128rm(bk|kz)$",
                                                                 "^VSCALEFPHZ128rm(k|bkz)$")>;
 def : InstRW<[SPRWriteResGroup534, ReadAfterVecYLd], (instregex "^VRNDSCALEPHZ256rm(b?)ik(z?)$",
@@ -4898,7 +4898,7 @@ def SPRWriteResGroup535 : SchedWriteRes<[SPRPort00_01]> {
   let NumMicroOps = 2;
 }
 def : InstRW<[SPRWriteResGroup535], (instregex "^VRNDSCALEPHZ(128|256)rrik(z?)$",
-                                               "^VRNDSCALESHZr(b?)_Intk(z?)$",
+                                               "^VRNDSCALESHZri(b?)_Intk(z?)$",
                                                "^VSCALEFPHZ(128|256)rrk(z?)$",
                                                "^VSCALEFSHZrrb_Intk(z?)$",
                                                "^VSCALEFSHZrrk(z?)$")>;
diff --git a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
index 2f587d789779cf..d5df85c490b7c1 100644
--- a/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
+++ b/llvm/test/CodeGen/X86/evex-to-vex-compress.mir
@@ -2309,21 +2309,21 @@ body: |
   ; CHECK: $xmm0 = VINSERTPSrri                $xmm0, $xmm0, 1
   $xmm0 = VINSERTPSZrri                        $xmm0, $xmm0, 1
   ; CHECK: $xmm0 = VROUNDSDmi                  $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  $xmm0 = VRNDSCALESDZm                        $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  $xmm0 = VRNDSCALESDZmi                       $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   ; CHECK: $xmm0 = VROUNDSDri                  $xmm0, $xmm1, 15, implicit $mxcsr
-  $xmm0 = VRNDSCALESDZr                        $xmm0, $xmm1, 15, implicit $mxcsr
+  $xmm0 = VRNDSCALESDZri                       $xmm0, $xmm1, 15, implicit $mxcsr
   ; CHECK: $xmm0 = VROUNDSSmi                  $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  $xmm0 = VRNDSCALESSZm                        $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  $xmm0 = VRNDSCALESSZmi                       $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   ; CHECK: $xmm0 = VROUNDSSri                  $xmm0, $xmm1, 15, implicit $mxcsr
-  $xmm0 = VRNDSCALESSZr                        $xmm0, $xmm1, 15, implicit $mxcsr
+  $xmm0 = VRNDSCALESSZri                       $xmm0, $xmm1, 15, implicit $mxcsr
   ; CHECK: $xmm0 = VROUNDSDmi_Int              $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  $xmm0 = VRNDSCALESDZm_Int                    $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  $xmm0 = VRNDSCALESDZmi_Int                   $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   ; CHECK: $xmm0 = VROUNDSDri_Int              $xmm0, $xmm1, 15, implicit $mxcsr
-  $xmm0 = VRNDSCALESDZr_Int                    $xmm0, $xmm1, 15, implicit $mxcsr
+  $xmm0 = VRNDSCALESDZri_Int                   $xmm0, $xmm1, 15, implicit $mxcsr
   ; CHECK: $xmm0 = VROUNDSSmi_Int              $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  $xmm0 = VRNDSCALESSZm_Int                    $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  $xmm0 = VRNDSCALESSZmi_Int                   $xmm0, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
   ; CHECK: $xmm0 = VROUNDSSri_Int              $xmm0, $xmm1, 15, implicit $mxcsr
-  $xmm0 = VRNDSCALESSZr_Int                    $xmm0, $xmm1, 15, implicit $mxcsr
+  $xmm0 = VRNDSCALESSZri_Int                   $xmm0, $xmm1, 15, implicit $mxcsr
 
   RET64
 ...
@@ -4636,38 +4636,38 @@ body: |
   VUCOMISSZrm                                  $xmm16, $rdi, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit $mxcsr
   ; CHECK: VUCOMISSZrr                         $xmm16, $xmm1, implicit-def $eflags, implicit $mxcsr
   VUCOMISSZrr                                  $xmm16, $xmm1, implicit-def $eflags, implicit $mxcsr
-  ; CHECK: $xmm16 = VRNDSCALESDZm              $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  $xmm16 = VRNDSCALESDZm                       $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  ; CHECK: $xmm16 = VRNDSCALESDZr              $xmm16, $xmm1, 15, implicit $mxcsr
-  $xmm16 = VRNDSCALESDZr                       $xmm16, $xmm1, 15, implicit $mxcsr
-  ; CHECK: $xmm16 = VRNDSCALESSZm              $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  $xmm16 = VRNDSCALESSZm                       $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  ; CHECK: $xmm16 = VRNDSCALESSZr              $xmm16, $xmm1, 15, implicit $mxcsr
-  $xmm16 = VRNDSCALESSZr                       $xmm16, $xmm1, 15, implicit $mxcsr
-  ; CHECK: $xmm16 = VRNDSCALESDZm_Int          $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  $xmm16 = VRNDSCALESDZm_Int                   $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  ; CHECK: $xmm16 = VRNDSCALESDZr_Int          $xmm16, $xmm1, 15, implicit $mxcsr
-  $xmm16 = VRNDSCALESDZr_Int                   $xmm16, $xmm1, 15, implicit $mxcsr
-  ; CHECK: $xmm16 = VRNDSCALESSZm_Int          $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  $xmm16 = VRNDSCALESSZm_Int                   $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
-  ; CHECK: $xmm16 = VRNDSCALESSZr_Int          $xmm16, $xmm1, 15, implicit $mxcsr
-  $xmm16 = VRNDSCALESSZr_Int                   $xmm16, $xmm1, 15, implicit $mxcsr
-  ; CHECK: $xmm0 = VRNDSCALESDZm               $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
-  $xmm0 = VRNDSCALESDZm                        $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
-  ; CHECK: $xmm0 = VRNDSCALESDZr               $xmm0, $xmm1, 31, implicit $mxcsr
-  $xmm0 = VRNDSCALESDZr                        $xmm0, $xmm1, 31, implicit $mxcsr
-  ; CHECK: $xmm0 = VRNDSCALESSZm               $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
-  $xmm0 = VRNDSCALESSZm                        $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
-  ; CHECK: $xmm0 = VRNDSCALESSZr               $xmm0, $xmm1, 31, implicit $mxcsr
-  $xmm0 = VRNDSCALESSZr                        $xmm0, $xmm1, 31, implicit $mxcsr
-  ; CHECK: $xmm0 = VRNDSCALESDZm_Int           $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
-  $xmm0 = VRNDSCALESDZm_Int                    $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
-  ; CHECK: $xmm0 = VRNDSCALESDZr_Int           $xmm0, $xmm1, 31, implicit $mxcsr
-  $xmm0 = VRNDSCALESDZr_Int                    $xmm0, $xmm1, 31, implicit $mxcsr
-  ; CHECK: $xmm0 = VRNDSCALESSZm_Int           $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
-  $xmm0 = VRNDSCALESSZm_Int                    $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
-  ; CHECK: $xmm0 = VRNDSCALESSZr_Int           $xmm0, $xmm1, 31, implicit $mxcsr
-  $xmm0 = VRNDSCALESSZr_Int                    $xmm0, $xmm1, 31, implicit $mxcsr
+  ; CHECK: $xmm16 = VRNDSCALESDZmi             $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  $xmm16 = VRNDSCALESDZmi                      $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  ; CHECK: $xmm16 = VRNDSCALESDZri             $xmm16, $xmm1, 15, implicit $mxcsr
+  $xmm16 = VRNDSCALESDZri                      $xmm16, $xmm1, 15, implicit $mxcsr
+  ; CHECK: $xmm16 = VRNDSCALESSZmi             $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  $xmm16 = VRNDSCALESSZmi                      $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  ; CHECK: $xmm16 = VRNDSCALESSZri             $xmm16, $xmm1, 15, implicit $mxcsr
+  $xmm16 = VRNDSCALESSZri                      $xmm16, $xmm1, 15, implicit $mxcsr
+  ; CHECK: $xmm16 = VRNDSCALESDZmi_Int         $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  $xmm16 = VRNDSCALESDZmi_Int                  $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  ; CHECK: $xmm16 = VRNDSCALESDZri_Int         $xmm16, $xmm1, 15, implicit $mxcsr
+  $xmm16 = VRNDSCALESDZri_Int                  $xmm16, $xmm1, 15, implicit $mxcsr
+  ; CHECK: $xmm16 = VRNDSCALESSZmi_Int         $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  $xmm16 = VRNDSCALESSZmi_Int                  $xmm16, $rip, 1, $noreg, 0, $noreg, 15, implicit $mxcsr
+  ; CHECK: $xmm16 = VRNDSCALESSZri_Int         $xmm16, $xmm1, 15, implicit $mxcsr
+  $xmm16 = VRNDSCALESSZri_Int                  $xmm16, $xmm1, 15, implicit $mxcsr
+  ; CHECK: $xmm0 = VRNDSCALESDZmi              $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+  $xmm0 = VRNDSCALESDZmi                       $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+  ; CHECK: $xmm0 = VRNDSCALESDZri              $xmm0, $xmm1, 31, implicit $mxcsr
+  $xmm0 = VRNDSCALESDZri                       $xmm0, $xmm1, 31, implicit $mxcsr
+  ; CHECK: $xmm0 = VRNDSCALESSZmi              $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+  $xmm0 = VRNDSCALESSZmi                       $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxcsr
+  ; CHECK: $xmm0 = VRNDSCALESSZri              $xmm0, $xmm1, 31, implicit $mxcsr
+  $xmm0 = VRNDSCALESSZri                       $xmm0, $xmm1, 31, implicit $mxcsr
+  ; CHECK: $xmm0 = VRNDSCALESDZmi_Int          $xmm0, $rip, 1, $noreg, 0, $noreg, 31, implicit $mxc...
[truncated]

case X86::VRNDSCALESDZm_Int:
case X86::VRNDSCALESSZr_Int:
case X86::VRNDSCALESSZm_Int:
case X86::VRNDSCALESDZri:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's inconsistent with the vector instructions which use rri. No idea which one is precise though.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, a lot of the scalar variants don't do this consistently, especially when they are unary - so far its something I've been managing to avoid. But rndscale is easier to fix than others (which share more tablegen macrodefs) - I'll see what I can do.

More canonicalization of the instruction names to make the predictable - more closely matches VRNDSCALEP / VROUND equivalent instructions
Copy link
Contributor

@phoebewang phoebewang left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM.

@RKSimon RKSimon changed the title [X86] Add missing immediate arg tag to VRNDSCALES instruction names [X86] Add missing reg/imm attributes to VRNDSCALES instruction names Nov 22, 2024
@RKSimon RKSimon merged commit 29f11f0 into llvm:main Nov 22, 2024
8 checks passed
@RKSimon RKSimon deleted the x86-vrndscales-imm branch November 22, 2024 17:45
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants