Skip to content

AtomicExpand: Regenerate baseline checks #103063

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 13, 2024

Conversation

arsenm
Copy link
Contributor

@arsenm arsenm commented Aug 13, 2024

Avoid spurious test diffs in a future commit.

Copy link
Contributor Author

arsenm commented Aug 13, 2024

This stack of pull requests is managed by Graphite. Learn more about stacking.

Join @arsenm and the rest of your teammates on Graphite Graphite

@llvmbot
Copy link
Member

llvmbot commented Aug 13, 2024

@llvm/pr-subscribers-backend-powerpc
@llvm/pr-subscribers-llvm-transforms

@llvm/pr-subscribers-backend-amdgpu

Author: Matt Arsenault (arsenm)

Changes

Avoid spurious test diffs in a future commit.


Patch is 381.16 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/103063.diff

14 Files Affected:

  • (modified) llvm/lib/Target/AMDGPU/SIISelLowering.cpp (+13-6)
  • (modified) llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll (+16-16)
  • (modified) llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll (+28-28)
  • (modified) llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll (+1368-1368)
  • (modified) llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-mmra.ll (+2)
  • (modified) llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd-flat-specialization.ll (+22-14)
  • (modified) llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll (+6-4)
  • (modified) llvm/test/Transforms/AtomicExpand/ARM/atomicrmw-fp.ll (+16-16)
  • (modified) llvm/test/Transforms/AtomicExpand/Hexagon/atomicrmw-fp.ll (+16-16)
  • (modified) llvm/test/Transforms/AtomicExpand/Mips/atomicrmw-fp.ll (+16-16)
  • (modified) llvm/test/Transforms/AtomicExpand/PowerPC/atomicrmw-fp.ll (+16-16)
  • (modified) llvm/test/Transforms/AtomicExpand/RISCV/atomicrmw-fp.ll (+14-14)
  • (modified) llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll (+40-40)
  • (modified) llvm/test/Transforms/AtomicExpand/X86/expand-atomic-xchg-fp.ll (+14-14)
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1cf9fb7a3724b7..261e5fcb2d467f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16647,6 +16647,9 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
   IRBuilder<> Builder(AI);
   LLVMContext &Ctx = Builder.getContext();
 
+  // If the return value isn't used, do not introduce a false use in the phi.
+  bool ReturnValueIsUsed = !AI->use_empty();
+
   BasicBlock *BB = Builder.GetInsertBlock();
   Function *F = BB->getParent();
   BasicBlock *ExitBB =
@@ -16710,14 +16713,18 @@ void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
   Builder.CreateBr(PhiBB);
 
   Builder.SetInsertPoint(PhiBB);
-  PHINode *Loaded = Builder.CreatePHI(ValTy, 3);
-  Loaded->addIncoming(LoadedShared, SharedBB);
-  Loaded->addIncoming(LoadedPrivate, PrivateBB);
-  Loaded->addIncoming(LoadedGlobal, GlobalBB);
+
+  if (ReturnValueIsUsed) {
+    PHINode *Loaded = Builder.CreatePHI(ValTy, 3);
+    Loaded->addIncoming(LoadedShared, SharedBB);
+    Loaded->addIncoming(LoadedPrivate, PrivateBB);
+    Loaded->addIncoming(LoadedGlobal, GlobalBB);
+    Loaded->takeName(AI);
+    AI->replaceAllUsesWith(Loaded);
+  }
+
   Builder.CreateBr(ExitBB);
 
-  Loaded->takeName(AI);
-  AI->replaceAllUsesWith(Loaded);
   AI->eraseFromParent();
 }
 
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
index ba6802f85c03cd..fa2ad60db7c291 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/atomicrmw-fp.ll
@@ -6,17 +6,17 @@ define float @test_atomicrmw_fadd_f32(ptr %ptr, float %value) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
 ; CHECK-NEXT:    [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
-; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
-; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float [[NEW]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float [[LOADED]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[TMP3]], i32 [[TMP2]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT:    [[TMP5]] = bitcast i32 [[NEWLOADED]] to float
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
 ; CHECK:       atomicrmw.end:
-; CHECK-NEXT:    ret float [[TMP6]]
+; CHECK-NEXT:    ret float [[TMP5]]
 ;
   %res = atomicrmw fadd ptr %ptr, float %value seq_cst
   ret float %res
@@ -27,17 +27,17 @@ define float @test_atomicrmw_fsub_f32(ptr %ptr, float %value) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[PTR:%.*]], align 4
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT:    [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP5:%.*]], [[ATOMICRMW_START]] ]
 ; CHECK-NEXT:    [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float [[NEW]] to i32
-; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = cmpxchg ptr [[PTR]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
-; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
-; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
-; CHECK-NEXT:    [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float [[NEW]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast float [[LOADED]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = cmpxchg ptr [[PTR]], i32 [[TMP3]], i32 [[TMP2]] seq_cst seq_cst, align 4
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP4]], 1
+; CHECK-NEXT:    [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP4]], 0
+; CHECK-NEXT:    [[TMP5]] = bitcast i32 [[NEWLOADED]] to float
 ; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
 ; CHECK:       atomicrmw.end:
-; CHECK-NEXT:    ret float [[TMP6]]
+; CHECK-NEXT:    ret float [[TMP5]]
 ;
   %res = atomicrmw fsub ptr %ptr, float %value seq_cst
   ret float %res
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
index ef2b5fe3672be0..95a52aa0f7f527 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
@@ -4,23 +4,23 @@
 
 define void @atomic_swap_f16(ptr %ptr, half %val) nounwind {
 ; CHECK-LABEL: @atomic_swap_f16(
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast half [[VAL:%.*]] to i16
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL:%.*]] to i16
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i16) [[PTR:%.*]])
-; CHECK-NEXT:    [[TMP4:%.*]] = trunc i64 [[TMP3]] to i16
-; CHECK-NEXT:    [[TMP5:%.*]] = zext i16 [[TMP2]] to i64
-; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[TMP5]], ptr elementtype(i16) [[PTR]])
-; CHECK-NEXT:    [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP6]], 0
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i16) [[PTR:%.*]])
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[TMP2]] to i16
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i16 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[TMP4]], ptr elementtype(i16) [[PTR]])
+; CHECK-NEXT:    [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
 ; CHECK:       atomicrmw.end:
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i16 [[TMP4]] to half
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16 [[TMP3]] to half
 ; CHECK-NEXT:    ret void
 ;
 ; OUTLINE-ATOMICS-LABEL: @atomic_swap_f16(
-; OUTLINE-ATOMICS-NEXT:    [[TMP2:%.*]] = bitcast half [[VAL:%.*]] to i16
-; OUTLINE-ATOMICS-NEXT:    [[TMP3:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[TMP2]] acquire, align 2
-; OUTLINE-ATOMICS-NEXT:    [[TMP4:%.*]] = bitcast i16 [[TMP3]] to half
+; OUTLINE-ATOMICS-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL:%.*]] to i16
+; OUTLINE-ATOMICS-NEXT:    [[TMP2:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[TMP1]] acquire, align 2
+; OUTLINE-ATOMICS-NEXT:    [[TMP3:%.*]] = bitcast i16 [[TMP2]] to half
 ; OUTLINE-ATOMICS-NEXT:    ret void
 ;
   %t1 = atomicrmw xchg ptr %ptr, half %val acquire
@@ -29,23 +29,23 @@ define void @atomic_swap_f16(ptr %ptr, half %val) nounwind {
 
 define void @atomic_swap_f32(ptr %ptr, float %val) nounwind {
 ; CHECK-LABEL: @atomic_swap_f32(
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast float [[VAL:%.*]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL:%.*]] to i32
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i32) [[PTR:%.*]])
-; CHECK-NEXT:    [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; CHECK-NEXT:    [[TMP5:%.*]] = zext i32 [[TMP2]] to i64
-; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[TMP5]], ptr elementtype(i32) [[PTR]])
-; CHECK-NEXT:    [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP6]], 0
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i32) [[PTR:%.*]])
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP5:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[TMP4]], ptr elementtype(i32) [[PTR]])
+; CHECK-NEXT:    [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP5]], 0
 ; CHECK-NEXT:    br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
 ; CHECK:       atomicrmw.end:
-; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32 [[TMP4]] to float
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32 [[TMP3]] to float
 ; CHECK-NEXT:    ret void
 ;
 ; OUTLINE-ATOMICS-LABEL: @atomic_swap_f32(
-; OUTLINE-ATOMICS-NEXT:    [[TMP2:%.*]] = bitcast float [[VAL:%.*]] to i32
-; OUTLINE-ATOMICS-NEXT:    [[TMP3:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i32 [[TMP2]] acquire, align 4
-; OUTLINE-ATOMICS-NEXT:    [[TMP4:%.*]] = bitcast i32 [[TMP3]] to float
+; OUTLINE-ATOMICS-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL:%.*]] to i32
+; OUTLINE-ATOMICS-NEXT:    [[TMP2:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i32 [[TMP1]] acquire, align 4
+; OUTLINE-ATOMICS-NEXT:    [[TMP3:%.*]] = bitcast i32 [[TMP2]] to float
 ; OUTLINE-ATOMICS-NEXT:    ret void
 ;
   %t1 = atomicrmw xchg ptr %ptr, float %val acquire
@@ -54,21 +54,21 @@ define void @atomic_swap_f32(ptr %ptr, float %val) nounwind {
 
 define void @atomic_swap_f64(ptr %ptr, double %val) nounwind {
 ; CHECK-LABEL: @atomic_swap_f64(
-; CHECK-NEXT:    [[TMP2:%.*]] = bitcast double [[VAL:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL:%.*]] to i64
 ; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i64) [[PTR:%.*]])
-; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[TMP2]], ptr elementtype(i64) [[PTR]])
-; CHECK-NEXT:    [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP4]], 0
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.aarch64.ldaxr.p0(ptr elementtype(i64) [[PTR:%.*]])
+; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.aarch64.stxr.p0(i64 [[TMP1]], ptr elementtype(i64) [[PTR]])
+; CHECK-NEXT:    [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP3]], 0
 ; CHECK-NEXT:    br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
 ; CHECK:       atomicrmw.end:
-; CHECK-NEXT:    [[TMP5:%.*]] = bitcast i64 [[TMP3]] to double
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast i64 [[TMP2]] to double
 ; CHECK-NEXT:    ret void
 ;
 ; OUTLINE-ATOMICS-LABEL: @atomic_swap_f64(
-; OUTLINE-ATOMICS-NEXT:    [[TMP2:%.*]] = bitcast double [[VAL:%.*]] to i64
-; OUTLINE-ATOMICS-NEXT:    [[TMP3:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[TMP2]] acquire, align 8
-; OUTLINE-ATOMICS-NEXT:    [[TMP4:%.*]] = bitcast i64 [[TMP3]] to double
+; OUTLINE-ATOMICS-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL:%.*]] to i64
+; OUTLINE-ATOMICS-NEXT:    [[TMP2:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[TMP1]] acquire, align 8
+; OUTLINE-ATOMICS-NEXT:    [[TMP3:%.*]] = bitcast i64 [[TMP2]] to double
 ; OUTLINE-ATOMICS-NEXT:    ret void
 ;
   %t1 = atomicrmw xchg ptr %ptr, double %val acquire
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
index cc42407c0210e7..c5c890559152df 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/pcsections.ll
@@ -4,7 +4,7 @@
 define i8 @atomic8_load_unordered(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_load_unordered(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[A:%.*]] unordered, align 1, !pcsections !0
+; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[A:%.*]] unordered, align 1, !pcsections [[META0:![0-9]+]]
 ; CHECK-NEXT:    ret i8 [[TMP0]]
 ;
 entry:
@@ -15,7 +15,7 @@ entry:
 define i8 @atomic8_load_monotonic(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_load_monotonic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[A:%.*]] monotonic, align 1, !pcsections !0
+; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[A:%.*]] monotonic, align 1, !pcsections [[META0]]
 ; CHECK-NEXT:    ret i8 [[TMP0]]
 ;
 entry:
@@ -26,7 +26,7 @@ entry:
 define i8 @atomic8_load_acquire(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_load_acquire(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[A:%.*]] acquire, align 1, !pcsections !0
+; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[A:%.*]] acquire, align 1, !pcsections [[META0]]
 ; CHECK-NEXT:    ret i8 [[TMP0]]
 ;
 entry:
@@ -37,7 +37,7 @@ entry:
 define i8 @atomic8_load_seq_cst(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_load_seq_cst(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[A:%.*]] seq_cst, align 1, !pcsections !0
+; CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[A:%.*]] seq_cst, align 1, !pcsections [[META0]]
 ; CHECK-NEXT:    ret i8 [[TMP0]]
 ;
 entry:
@@ -48,7 +48,7 @@ entry:
 define void @atomic8_store_unordered(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_store_unordered(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store atomic i8 0, ptr [[A:%.*]] unordered, align 1, !pcsections !0
+; CHECK-NEXT:    store atomic i8 0, ptr [[A:%.*]] unordered, align 1, !pcsections [[META0]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -59,7 +59,7 @@ entry:
 define void @atomic8_store_monotonic(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_store_monotonic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store atomic i8 0, ptr [[A:%.*]] monotonic, align 1, !pcsections !0
+; CHECK-NEXT:    store atomic i8 0, ptr [[A:%.*]] monotonic, align 1, !pcsections [[META0]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -70,7 +70,7 @@ entry:
 define void @atomic8_store_release(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_store_release(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store atomic i8 0, ptr [[A:%.*]] release, align 1, !pcsections !0
+; CHECK-NEXT:    store atomic i8 0, ptr [[A:%.*]] release, align 1, !pcsections [[META0]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -81,7 +81,7 @@ entry:
 define void @atomic8_store_seq_cst(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_store_seq_cst(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    store atomic i8 0, ptr [[A:%.*]] seq_cst, align 1, !pcsections !0
+; CHECK-NEXT:    store atomic i8 0, ptr [[A:%.*]] seq_cst, align 1, !pcsections [[META0]]
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -92,14 +92,14 @@ entry:
 define void @atomic8_xchg_monotonic(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_xchg_monotonic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1, !pcsections !0
-; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]], !pcsections !0
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1, !pcsections [[META0]]
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]], !pcsections [[META0]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[LOADED:%.*]] = phi i8 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ], !pcsections !0
-; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[A]], i8 [[LOADED]], i8 0 monotonic monotonic, align 1, !pcsections !0
-; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1, !pcsections !0
-; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP1]], 0, !pcsections !0
-; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]], !pcsections !0
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i8 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ], !pcsections [[META0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[A]], i8 [[LOADED]], i8 0 monotonic monotonic, align 1, !pcsections [[META0]]
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1, !pcsections [[META0]]
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP1]], 0, !pcsections [[META0]]
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]], !pcsections [[META0]]
 ; CHECK:       atomicrmw.end:
 ; CHECK-NEXT:    ret void
 ;
@@ -111,14 +111,14 @@ entry:
 define void @atomic8_add_monotonic(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_add_monotonic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1, !pcsections !0
-; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]], !pcsections !0
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1, !pcsections [[META0]]
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]], !pcsections [[META0]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[LOADED:%.*]] = phi i8 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ], !pcsections !0
-; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[A]], i8 [[LOADED]], i8 [[LOADED]] monotonic monotonic, align 1, !pcsections !0
-; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1, !pcsections !0
-; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP1]], 0, !pcsections !0
-; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]], !pcsections !0
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i8 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ], !pcsections [[META0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[A]], i8 [[LOADED]], i8 [[LOADED]] monotonic monotonic, align 1, !pcsections [[META0]]
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1, !pcsections [[META0]]
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP1]], 0, !pcsections [[META0]]
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]], !pcsections [[META0]]
 ; CHECK:       atomicrmw.end:
 ; CHECK-NEXT:    ret void
 ;
@@ -130,14 +130,14 @@ entry:
 define void @atomic8_sub_monotonic(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_sub_monotonic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1, !pcsections !0
-; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]], !pcsections !0
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1, !pcsections [[META0]]
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]], !pcsections [[META0]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[LOADED:%.*]] = phi i8 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ], !pcsections !0
-; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[A]], i8 [[LOADED]], i8 [[LOADED]] monotonic monotonic, align 1, !pcsections !0
-; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1, !pcsections !0
-; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP1]], 0, !pcsections !0
-; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]], !pcsections !0
+; CHECK-NEXT:    [[LOADED:%.*]] = phi i8 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ], !pcsections [[META0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[A]], i8 [[LOADED]], i8 [[LOADED]] monotonic monotonic, align 1, !pcsections [[META0]]
+; CHECK-NEXT:    [[SUCCESS:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1, !pcsections [[META0]]
+; CHECK-NEXT:    [[NEWLOADED]] = extractvalue { i8, i1 } [[TMP1]], 0, !pcsections [[META0]]
+; CHECK-NEXT:    br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]], !pcsections [[META0]]
 ; CHECK:       atomicrmw.end:
 ; CHECK-NEXT:    ret void
 ;
@@ -149,14 +149,14 @@ entry:
 define void @atomic8_and_monotonic(ptr %a) nounwind uwtable {
 ; CHECK-LABEL: @atomic8_and_monotonic(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1, !pcsections !0
-; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]], !pcsections !0
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[A:%.*]], align 1, !pcsections [[META0]]
+; CHECK-NEXT:    br label [[ATOMICRMW_START:%.*]], !pcsections [[META0]]
 ; CHECK:       atomicrmw.start:
-; CHECK-NEXT:    [[LOADED:%.*]] = phi i8 [ [[TMP0]], [[ENTRY:%.*]] ], [ [[NEWLOADED:%.*]], [[ATOMICRMW_START]] ], !pcsections !0
-; CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[A]], i8 [[LOADED]], i8 0 monotonic monotonic, ali...
[truncated]

Avoid spurious test diffs in a future commit.
@arsenm arsenm force-pushed the users/arsenm/atomic-expand-regenerate-baseline-checks branch from 018cef7 to c6d804a Compare August 13, 2024 15:58
@arsenm arsenm merged commit 80c51fa into main Aug 13, 2024
6 of 7 checks passed
@arsenm arsenm deleted the users/arsenm/atomic-expand-regenerate-baseline-checks branch August 13, 2024 16:43
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

2 participants