Skip to content

[X86] Add lowering tests for promoted CMPCCXADD and update CC representation #78685

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 22, 2024

Conversation

XinWang10
Copy link
Contributor

@XinWang10 XinWang10 commented Jan 19, 2024

#76125 supported the enc/dec for CMPCCXADD instructions, this patch

  1. Add lowering test for promoted CMPCCXADD
  2. Update the representation of condition code for promoted CMPCCXADD to align with the existing one

@XinWang10 XinWang10 requested a review from FreddyLeaf January 19, 2024 09:05
@llvmbot llvmbot added backend:X86 mc Machine (object) code labels Jan 19, 2024
@llvmbot
Copy link
Member

llvmbot commented Jan 19, 2024

@llvm/pr-subscribers-mc

@llvm/pr-subscribers-backend-x86

Author: None (XinWang10)

Changes

#76125 support the enc/dec for CMPCCXADD instructions, this patch

  1. Add lowering test for CMPCCXADD
  2. Fix mnemonic emitting error, CMPCCXADD has special mnemonic different from generic CC condcode, add handler for promoted instrucitons in X86InstPrinterCommon.cpp.

Patch is 42.50 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/78685.diff

5 Files Affected:

  • (modified) llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp (+3-1)
  • (modified) llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll (+193)
  • (modified) llvm/test/MC/Disassembler/X86/apx/cmpccxadd.txt (+40-40)
  • (modified) llvm/test/MC/X86/apx/cmpccxadd-att.s (+40-40)
  • (modified) llvm/test/MC/X86/apx/cmpccxadd-intel.s (+40-40)
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
index 1947313a9dfb0b..e519c00a21109a 100644
--- a/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
+++ b/llvm/lib/Target/X86/MCTargetDesc/X86InstPrinterCommon.cpp
@@ -30,7 +30,9 @@ void X86InstPrinterCommon::printCondCode(const MCInst *MI, unsigned Op,
                                          raw_ostream &O) {
   int64_t Imm = MI->getOperand(Op).getImm();
   bool Flavor = MI->getOpcode() == X86::CMPCCXADDmr32 ||
-                MI->getOpcode() == X86::CMPCCXADDmr64;
+                MI->getOpcode() == X86::CMPCCXADDmr64 ||
+                MI->getOpcode() == X86::CMPCCXADDmr32_EVEX ||
+                MI->getOpcode() == X86::CMPCCXADDmr64_EVEX;
   switch (Imm) {
   default: llvm_unreachable("Invalid condcode argument!");
   case    0: O << "o";  break;
diff --git a/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll b/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll
index b888fb5b1ff694..f88216f95a7614 100644
--- a/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/cmpccxadd-intrinsics.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+cmpccxadd | FileCheck %s
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+cmpccxadd,+egpr | FileCheck %s --check-prefix=EGPR
 
 define dso_local i32 @test_cmpbexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-LABEL: test_cmpbexadd32:
@@ -7,6 +8,12 @@ define dso_local i32 @test_cmpbexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpoxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe0,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpbexadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpoxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe0,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 0)
   ret i32 %0
@@ -20,6 +27,12 @@ define dso_local i64 @test_cmpbexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpoxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe0,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpbexadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpoxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe0,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 0)
   ret i64 %0
@@ -33,6 +46,12 @@ define dso_local i32 @test_cmpbxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpnoxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe1,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpbxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpnoxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe1,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 1)
   ret i32 %0
@@ -44,6 +63,12 @@ define dso_local i64 @test_cmpbxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpnoxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe1,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpbxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpnoxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe1,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 1)
   ret i64 %0
@@ -55,6 +80,12 @@ define dso_local i32 @test_cmplexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpbxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe2,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmplexadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpbxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe2,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 2)
   ret i32 %0
@@ -66,6 +97,12 @@ define dso_local i64 @test_cmplexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpbxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe2,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmplexadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpbxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe2,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 2)
   ret i64 %0
@@ -77,6 +114,12 @@ define dso_local i32 @test_cmplxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpnbxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe3,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmplxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpnbxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe3,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 3)
   ret i32 %0
@@ -88,6 +131,12 @@ define dso_local i64 @test_cmplxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpnbxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe3,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmplxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpnbxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe3,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 3)
   ret i64 %0
@@ -99,6 +148,12 @@ define dso_local i32 @test_cmpnbexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpzxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe4,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnbexadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpzxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe4,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 4)
   ret i32 %0
@@ -110,6 +165,12 @@ define dso_local i64 @test_cmpnbexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpzxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe4,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnbexadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpzxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe4,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 4)
   ret i64 %0
@@ -121,6 +182,12 @@ define dso_local i32 @test_cmpnbxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpnzxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe5,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnbxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpnzxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe5,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 5)
   ret i32 %0
@@ -132,6 +199,12 @@ define dso_local i64 @test_cmpnbxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpnzxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe5,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnbxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpnzxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe5,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 5)
   ret i64 %0
@@ -143,6 +216,12 @@ define dso_local i32 @test_cmpnlexadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpbexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe6,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnlexadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpbexadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe6,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 6)
   ret i32 %0
@@ -154,6 +233,12 @@ define dso_local i64 @test_cmpnlexadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpbexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe6,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnlexadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpbexadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe6,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 6)
   ret i64 %0
@@ -165,6 +250,12 @@ define dso_local i32 @test_cmpnlxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpnbexadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe7,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnlxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpnbexadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe7,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 7)
   ret i32 %0
@@ -176,6 +267,12 @@ define dso_local i64 @test_cmpnlxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpnbexadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe7,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnlxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpnbexadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe7,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 7)
   ret i64 %0
@@ -187,6 +284,12 @@ define dso_local i32 @test_cmpnoxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpsxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe8,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnoxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpsxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe8,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 8)
   ret i32 %0
@@ -198,6 +301,12 @@ define dso_local i64 @test_cmpnoxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpsxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe8,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnoxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpsxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe8,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 8)
   ret i64 %0
@@ -209,6 +318,12 @@ define dso_local i32 @test_cmpnpxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpnsxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xe9,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnpxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpnsxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xe9,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 9)
   ret i32 %0
@@ -220,6 +335,12 @@ define dso_local i64 @test_cmpnpxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpnsxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xe9,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnpxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpnsxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xe9,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 9)
   ret i64 %0
@@ -231,6 +352,12 @@ define dso_local i32 @test_cmpnsxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmppxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xea,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnsxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmppxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xea,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 10)
   ret i32 %0
@@ -242,6 +369,12 @@ define dso_local i64 @test_cmpnsxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmppxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xea,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnsxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmppxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xea,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 10)
   ret i64 %0
@@ -253,6 +386,12 @@ define dso_local i32 @test_cmpnzxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpnpxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xeb,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnzxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpnpxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xeb,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 11)
   ret i32 %0
@@ -264,6 +403,12 @@ define dso_local i64 @test_cmpnzxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpnpxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xeb,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpnzxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpnpxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xeb,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 11)
   ret i64 %0
@@ -275,6 +420,12 @@ define dso_local i32 @test_cmpoxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmplxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xec,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpoxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmplxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xec,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 12)
   ret i32 %0
@@ -286,6 +437,12 @@ define dso_local i64 @test_cmpoxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmplxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xec,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmpoxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmplxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xec,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 12)
   ret i64 %0
@@ -297,6 +454,12 @@ define dso_local i32 @test_cmppxadd32(ptr %__A, i32 %__B, i32 %__C) nounwind {
 ; CHECK-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
 ; CHECK-NEXT:    cmpnlxadd %edx, %eax, (%rdi) # encoding: [0xc4,0xe2,0x69,0xed,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmppxadd32:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movl %esi, %eax # encoding: [0x89,0xf0]
+; EGPR-NEXT:    cmpnlxadd %edx, %eax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x69,0xed,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i32 @llvm.x86.cmpccxadd32(ptr %__A, i32 %__B, i32 %__C, i32 13)
   ret i32 %0
@@ -308,6 +471,12 @@ define dso_local i64 @test_cmppxadd64(ptr %__A, i64 %__B, i64 %__C) nounwind {
 ; CHECK-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
 ; CHECK-NEXT:    cmpnlxadd %rdx, %rax, (%rdi) # encoding: [0xc4,0xe2,0xe9,0xed,0x07]
 ; CHECK-NEXT:    retq # encoding: [0xc3]
+;
+; EGPR-LABEL: test_cmppxadd64:
+; EGPR:       # %bb.0: # %entry
+; EGPR-NEXT:    movq %rsi, %rax # encoding: [0x48,0x89,0xf0]
+; EGPR-NEXT:    cmpnlxadd %rdx, %rax, (%rdi) # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xe9,0xed,0x07]
+; EGPR-NEXT:    retq # encoding: [0xc3]
 entry:
   %0 = tail call i64 @llvm.x86.cmpccxadd64(ptr %__A, i64 %__B, i64 %__C, i32 13)
   ret i64 %0
@@ -319,6 +488,12 @@ define dso_local i32 @test_cmpsxadd32(ptr %__A, i32 %__B, i32 %_...
[truncated]

@XinWang10 XinWang10 requested a review from RKSimon January 19, 2024 09:06
@KanRobert KanRobert changed the title [X86]Support APX promoted CMPCCXADD instructions [X86] Add lowering tests for promoted CMPCCXADD and fix mnemonic Jan 20, 2024
@KanRobert KanRobert changed the title [X86] Add lowering tests for promoted CMPCCXADD and fix mnemonic [X86] Add lowering tests for promoted CMPCCXADD and update CC representation Jan 20, 2024
Copy link
Contributor

@KanRobert KanRobert left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

@XinWang10 XinWang10 merged commit d3cd1ce into llvm:main Jan 22, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
backend:X86 mc Machine (object) code
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants