Skip to content

Commit 8e3cde0

Browse files
committed
[LoongArch][test] Add float-point atomic load/store tests. NFC
1 parent b7b945b commit 8e3cde0

File tree

1 file changed

+337
-0
lines changed

1 file changed

+337
-0
lines changed

llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll

Lines changed: 337 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,50 @@ define ptr @load_acquire_ptr(ptr %ptr) {
8888
ret ptr %val
8989
}
9090

91+
define float @load_acquire_float(ptr %ptr) {
92+
; LA32-LABEL: load_acquire_float:
93+
; LA32: # %bb.0:
94+
; LA32-NEXT: ld.w $a0, $a0, 0
95+
; LA32-NEXT: movgr2fr.w $fa0, $a0
96+
; LA32-NEXT: dbar 20
97+
; LA32-NEXT: ret
98+
;
99+
; LA64-LABEL: load_acquire_float:
100+
; LA64: # %bb.0:
101+
; LA64-NEXT: ld.w $a0, $a0, 0
102+
; LA64-NEXT: movgr2fr.w $fa0, $a0
103+
; LA64-NEXT: dbar 20
104+
; LA64-NEXT: ret
105+
%val = load atomic float, ptr %ptr acquire, align 8
106+
ret float %val
107+
}
108+
109+
define double @load_acquire_double(ptr %ptr) {
110+
; LA32-LABEL: load_acquire_double:
111+
; LA32: # %bb.0:
112+
; LA32-NEXT: addi.w $sp, $sp, -16
113+
; LA32-NEXT: .cfi_def_cfa_offset 16
114+
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
115+
; LA32-NEXT: .cfi_offset 1, -4
116+
; LA32-NEXT: ori $a1, $zero, 2
117+
; LA32-NEXT: bl %plt(__atomic_load_8)
118+
; LA32-NEXT: st.w $a1, $sp, 4
119+
; LA32-NEXT: st.w $a0, $sp, 0
120+
; LA32-NEXT: fld.d $fa0, $sp, 0
121+
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
122+
; LA32-NEXT: addi.w $sp, $sp, 16
123+
; LA32-NEXT: ret
124+
;
125+
; LA64-LABEL: load_acquire_double:
126+
; LA64: # %bb.0:
127+
; LA64-NEXT: ld.d $a0, $a0, 0
128+
; LA64-NEXT: movgr2fr.d $fa0, $a0
129+
; LA64-NEXT: dbar 20
130+
; LA64-NEXT: ret
131+
%val = load atomic double, ptr %ptr acquire, align 8
132+
ret double %val
133+
}
134+
91135
define i8 @load_unordered_i8(ptr %ptr) {
92136
; LA32-LABEL: load_unordered_i8:
93137
; LA32: # %bb.0:
@@ -165,6 +209,47 @@ define ptr @load_unordered_ptr(ptr %ptr) {
165209
ret ptr %val
166210
}
167211

212+
define float @load_unordered_float(ptr %ptr) {
213+
; LA32-LABEL: load_unordered_float:
214+
; LA32: # %bb.0:
215+
; LA32-NEXT: ld.w $a0, $a0, 0
216+
; LA32-NEXT: movgr2fr.w $fa0, $a0
217+
; LA32-NEXT: ret
218+
;
219+
; LA64-LABEL: load_unordered_float:
220+
; LA64: # %bb.0:
221+
; LA64-NEXT: ld.w $a0, $a0, 0
222+
; LA64-NEXT: movgr2fr.w $fa0, $a0
223+
; LA64-NEXT: ret
224+
%val = load atomic float, ptr %ptr unordered, align 8
225+
ret float %val
226+
}
227+
228+
define double @load_unordered_double(ptr %ptr) {
229+
; LA32-LABEL: load_unordered_double:
230+
; LA32: # %bb.0:
231+
; LA32-NEXT: addi.w $sp, $sp, -16
232+
; LA32-NEXT: .cfi_def_cfa_offset 16
233+
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
234+
; LA32-NEXT: .cfi_offset 1, -4
235+
; LA32-NEXT: move $a1, $zero
236+
; LA32-NEXT: bl %plt(__atomic_load_8)
237+
; LA32-NEXT: st.w $a1, $sp, 4
238+
; LA32-NEXT: st.w $a0, $sp, 0
239+
; LA32-NEXT: fld.d $fa0, $sp, 0
240+
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
241+
; LA32-NEXT: addi.w $sp, $sp, 16
242+
; LA32-NEXT: ret
243+
;
244+
; LA64-LABEL: load_unordered_double:
245+
; LA64: # %bb.0:
246+
; LA64-NEXT: ld.d $a0, $a0, 0
247+
; LA64-NEXT: movgr2fr.d $fa0, $a0
248+
; LA64-NEXT: ret
249+
%val = load atomic double, ptr %ptr unordered, align 8
250+
ret double %val
251+
}
252+
168253
define i8 @load_monotonic_i8(ptr %ptr) {
169254
; LA32-LABEL: load_monotonic_i8:
170255
; LA32: # %bb.0:
@@ -242,6 +327,47 @@ define ptr @load_monotonic_ptr(ptr %ptr) {
242327
ret ptr %val
243328
}
244329

330+
define float @load_monotonic_float(ptr %ptr) {
331+
; LA32-LABEL: load_monotonic_float:
332+
; LA32: # %bb.0:
333+
; LA32-NEXT: ld.w $a0, $a0, 0
334+
; LA32-NEXT: movgr2fr.w $fa0, $a0
335+
; LA32-NEXT: ret
336+
;
337+
; LA64-LABEL: load_monotonic_float:
338+
; LA64: # %bb.0:
339+
; LA64-NEXT: ld.w $a0, $a0, 0
340+
; LA64-NEXT: movgr2fr.w $fa0, $a0
341+
; LA64-NEXT: ret
342+
%val = load atomic float, ptr %ptr monotonic, align 8
343+
ret float %val
344+
}
345+
346+
define double @load_monotonic_double(ptr %ptr) {
347+
; LA32-LABEL: load_monotonic_double:
348+
; LA32: # %bb.0:
349+
; LA32-NEXT: addi.w $sp, $sp, -16
350+
; LA32-NEXT: .cfi_def_cfa_offset 16
351+
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
352+
; LA32-NEXT: .cfi_offset 1, -4
353+
; LA32-NEXT: move $a1, $zero
354+
; LA32-NEXT: bl %plt(__atomic_load_8)
355+
; LA32-NEXT: st.w $a1, $sp, 4
356+
; LA32-NEXT: st.w $a0, $sp, 0
357+
; LA32-NEXT: fld.d $fa0, $sp, 0
358+
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
359+
; LA32-NEXT: addi.w $sp, $sp, 16
360+
; LA32-NEXT: ret
361+
;
362+
; LA64-LABEL: load_monotonic_double:
363+
; LA64: # %bb.0:
364+
; LA64-NEXT: ld.d $a0, $a0, 0
365+
; LA64-NEXT: movgr2fr.d $fa0, $a0
366+
; LA64-NEXT: ret
367+
%val = load atomic double, ptr %ptr monotonic, align 8
368+
ret double %val
369+
}
370+
245371
define i8 @load_seq_cst_i8(ptr %ptr) {
246372
; LA32-LABEL: load_seq_cst_i8:
247373
; LA32: # %bb.0:
@@ -328,6 +454,50 @@ define ptr @load_seq_cst_ptr(ptr %ptr) {
328454
ret ptr %val
329455
}
330456

457+
define float @load_seq_cst_float(ptr %ptr) {
458+
; LA32-LABEL: load_seq_cst_float:
459+
; LA32: # %bb.0:
460+
; LA32-NEXT: ld.w $a0, $a0, 0
461+
; LA32-NEXT: movgr2fr.w $fa0, $a0
462+
; LA32-NEXT: dbar 16
463+
; LA32-NEXT: ret
464+
;
465+
; LA64-LABEL: load_seq_cst_float:
466+
; LA64: # %bb.0:
467+
; LA64-NEXT: ld.w $a0, $a0, 0
468+
; LA64-NEXT: movgr2fr.w $fa0, $a0
469+
; LA64-NEXT: dbar 16
470+
; LA64-NEXT: ret
471+
%val = load atomic float, ptr %ptr seq_cst, align 8
472+
ret float %val
473+
}
474+
475+
define double @load_seq_cst_double(ptr %ptr) {
476+
; LA32-LABEL: load_seq_cst_double:
477+
; LA32: # %bb.0:
478+
; LA32-NEXT: addi.w $sp, $sp, -16
479+
; LA32-NEXT: .cfi_def_cfa_offset 16
480+
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
481+
; LA32-NEXT: .cfi_offset 1, -4
482+
; LA32-NEXT: ori $a1, $zero, 5
483+
; LA32-NEXT: bl %plt(__atomic_load_8)
484+
; LA32-NEXT: st.w $a1, $sp, 4
485+
; LA32-NEXT: st.w $a0, $sp, 0
486+
; LA32-NEXT: fld.d $fa0, $sp, 0
487+
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
488+
; LA32-NEXT: addi.w $sp, $sp, 16
489+
; LA32-NEXT: ret
490+
;
491+
; LA64-LABEL: load_seq_cst_double:
492+
; LA64: # %bb.0:
493+
; LA64-NEXT: ld.d $a0, $a0, 0
494+
; LA64-NEXT: movgr2fr.d $fa0, $a0
495+
; LA64-NEXT: dbar 16
496+
; LA64-NEXT: ret
497+
%val = load atomic double, ptr %ptr seq_cst, align 8
498+
ret double %val
499+
}
500+
331501
define void @store_release_i8(ptr %ptr, i8 signext %v) {
332502
; LA32-LABEL: store_release_i8:
333503
; LA32: # %bb.0:
@@ -411,6 +581,48 @@ define void @store_release_ptr(ptr %ptr, ptr %v) {
411581
ret void
412582
}
413583

584+
define void @store_release_float(ptr %ptr, float %v) {
585+
; LA32-LABEL: store_release_float:
586+
; LA32: # %bb.0:
587+
; LA32-NEXT: movfr2gr.s $a1, $fa0
588+
; LA32-NEXT: dbar 18
589+
; LA32-NEXT: st.w $a1, $a0, 0
590+
; LA32-NEXT: ret
591+
;
592+
; LA64-LABEL: store_release_float:
593+
; LA64: # %bb.0:
594+
; LA64-NEXT: movfr2gr.s $a1, $fa0
595+
; LA64-NEXT: amswap_db.w $zero, $a1, $a0
596+
; LA64-NEXT: ret
597+
store atomic float %v, ptr %ptr release, align 8
598+
ret void
599+
}
600+
601+
define void @store_release_double(ptr %ptr, double %v) {
602+
; LA32-LABEL: store_release_double:
603+
; LA32: # %bb.0:
604+
; LA32-NEXT: addi.w $sp, $sp, -16
605+
; LA32-NEXT: .cfi_def_cfa_offset 16
606+
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
607+
; LA32-NEXT: .cfi_offset 1, -4
608+
; LA32-NEXT: fst.d $fa0, $sp, 0
609+
; LA32-NEXT: ld.w $a1, $sp, 0
610+
; LA32-NEXT: ld.w $a2, $sp, 4
611+
; LA32-NEXT: ori $a3, $zero, 3
612+
; LA32-NEXT: bl %plt(__atomic_store_8)
613+
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
614+
; LA32-NEXT: addi.w $sp, $sp, 16
615+
; LA32-NEXT: ret
616+
;
617+
; LA64-LABEL: store_release_double:
618+
; LA64: # %bb.0:
619+
; LA64-NEXT: movfr2gr.d $a1, $fa0
620+
; LA64-NEXT: amswap_db.d $zero, $a1, $a0
621+
; LA64-NEXT: ret
622+
store atomic double %v, ptr %ptr release, align 8
623+
ret void
624+
}
625+
414626
define void @store_unordered_i8(ptr %ptr, i8 signext %v) {
415627
; LA32-LABEL: store_unordered_i8:
416628
; LA32: # %bb.0:
@@ -488,6 +700,47 @@ define void @store_unordered_ptr(ptr %ptr, ptr %v) {
488700
ret void
489701
}
490702

703+
define void @store_unordered_float(ptr %ptr, float %v) {
704+
; LA32-LABEL: store_unordered_float:
705+
; LA32: # %bb.0:
706+
; LA32-NEXT: movfr2gr.s $a1, $fa0
707+
; LA32-NEXT: st.w $a1, $a0, 0
708+
; LA32-NEXT: ret
709+
;
710+
; LA64-LABEL: store_unordered_float:
711+
; LA64: # %bb.0:
712+
; LA64-NEXT: movfr2gr.s $a1, $fa0
713+
; LA64-NEXT: st.w $a1, $a0, 0
714+
; LA64-NEXT: ret
715+
store atomic float %v, ptr %ptr unordered, align 8
716+
ret void
717+
}
718+
719+
define void @store_unordered_double(ptr %ptr, double %v) {
720+
; LA32-LABEL: store_unordered_double:
721+
; LA32: # %bb.0:
722+
; LA32-NEXT: addi.w $sp, $sp, -16
723+
; LA32-NEXT: .cfi_def_cfa_offset 16
724+
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
725+
; LA32-NEXT: .cfi_offset 1, -4
726+
; LA32-NEXT: fst.d $fa0, $sp, 0
727+
; LA32-NEXT: ld.w $a1, $sp, 0
728+
; LA32-NEXT: ld.w $a2, $sp, 4
729+
; LA32-NEXT: move $a3, $zero
730+
; LA32-NEXT: bl %plt(__atomic_store_8)
731+
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
732+
; LA32-NEXT: addi.w $sp, $sp, 16
733+
; LA32-NEXT: ret
734+
;
735+
; LA64-LABEL: store_unordered_double:
736+
; LA64: # %bb.0:
737+
; LA64-NEXT: movfr2gr.d $a1, $fa0
738+
; LA64-NEXT: st.d $a1, $a0, 0
739+
; LA64-NEXT: ret
740+
store atomic double %v, ptr %ptr unordered, align 8
741+
ret void
742+
}
743+
491744
define void @store_monotonic_i8(ptr %ptr, i8 signext %v) {
492745
; LA32-LABEL: store_monotonic_i8:
493746
; LA32: # %bb.0:
@@ -565,6 +818,47 @@ define void @store_monotonic_ptr(ptr %ptr, ptr %v) {
565818
ret void
566819
}
567820

821+
define void @store_monotonic_float(ptr %ptr, float %v) {
822+
; LA32-LABEL: store_monotonic_float:
823+
; LA32: # %bb.0:
824+
; LA32-NEXT: movfr2gr.s $a1, $fa0
825+
; LA32-NEXT: st.w $a1, $a0, 0
826+
; LA32-NEXT: ret
827+
;
828+
; LA64-LABEL: store_monotonic_float:
829+
; LA64: # %bb.0:
830+
; LA64-NEXT: movfr2gr.s $a1, $fa0
831+
; LA64-NEXT: st.w $a1, $a0, 0
832+
; LA64-NEXT: ret
833+
store atomic float %v, ptr %ptr monotonic, align 8
834+
ret void
835+
}
836+
837+
define void @store_monotonic_double(ptr %ptr, double %v) {
838+
; LA32-LABEL: store_monotonic_double:
839+
; LA32: # %bb.0:
840+
; LA32-NEXT: addi.w $sp, $sp, -16
841+
; LA32-NEXT: .cfi_def_cfa_offset 16
842+
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
843+
; LA32-NEXT: .cfi_offset 1, -4
844+
; LA32-NEXT: fst.d $fa0, $sp, 0
845+
; LA32-NEXT: ld.w $a1, $sp, 0
846+
; LA32-NEXT: ld.w $a2, $sp, 4
847+
; LA32-NEXT: move $a3, $zero
848+
; LA32-NEXT: bl %plt(__atomic_store_8)
849+
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
850+
; LA32-NEXT: addi.w $sp, $sp, 16
851+
; LA32-NEXT: ret
852+
;
853+
; LA64-LABEL: store_monotonic_double:
854+
; LA64: # %bb.0:
855+
; LA64-NEXT: movfr2gr.d $a1, $fa0
856+
; LA64-NEXT: st.d $a1, $a0, 0
857+
; LA64-NEXT: ret
858+
store atomic double %v, ptr %ptr monotonic, align 8
859+
ret void
860+
}
861+
568862
define void @store_seq_cst_i8(ptr %ptr, i8 signext %v) {
569863
; LA32-LABEL: store_seq_cst_i8:
570864
; LA32: # %bb.0:
@@ -653,3 +947,46 @@ define void @store_seq_cst_ptr(ptr %ptr, ptr %v) {
653947
store atomic ptr %v, ptr %ptr seq_cst, align 8
654948
ret void
655949
}
950+
951+
define void @store_seq_cst_float(ptr %ptr, float %v) {
952+
; LA32-LABEL: store_seq_cst_float:
953+
; LA32: # %bb.0:
954+
; LA32-NEXT: movfr2gr.s $a1, $fa0
955+
; LA32-NEXT: dbar 16
956+
; LA32-NEXT: st.w $a1, $a0, 0
957+
; LA32-NEXT: dbar 16
958+
; LA32-NEXT: ret
959+
;
960+
; LA64-LABEL: store_seq_cst_float:
961+
; LA64: # %bb.0:
962+
; LA64-NEXT: movfr2gr.s $a1, $fa0
963+
; LA64-NEXT: amswap_db.w $zero, $a1, $a0
964+
; LA64-NEXT: ret
965+
store atomic float %v, ptr %ptr seq_cst, align 8
966+
ret void
967+
}
968+
969+
define void @store_seq_cst_double(ptr %ptr, double %v) {
970+
; LA32-LABEL: store_seq_cst_double:
971+
; LA32: # %bb.0:
972+
; LA32-NEXT: addi.w $sp, $sp, -16
973+
; LA32-NEXT: .cfi_def_cfa_offset 16
974+
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
975+
; LA32-NEXT: .cfi_offset 1, -4
976+
; LA32-NEXT: fst.d $fa0, $sp, 0
977+
; LA32-NEXT: ld.w $a1, $sp, 0
978+
; LA32-NEXT: ld.w $a2, $sp, 4
979+
; LA32-NEXT: ori $a3, $zero, 5
980+
; LA32-NEXT: bl %plt(__atomic_store_8)
981+
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
982+
; LA32-NEXT: addi.w $sp, $sp, 16
983+
; LA32-NEXT: ret
984+
;
985+
; LA64-LABEL: store_seq_cst_double:
986+
; LA64: # %bb.0:
987+
; LA64-NEXT: movfr2gr.d $a1, $fa0
988+
; LA64-NEXT: amswap_db.d $zero, $a1, $a0
989+
; LA64-NEXT: ret
990+
store atomic double %v, ptr %ptr seq_cst, align 8
991+
ret void
992+
}

0 commit comments

Comments
 (0)