@@ -869,19 +869,10 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
869
869
; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp)
870
870
; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp)
871
871
; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp)
872
- ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 16
873
- ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp)
874
- ; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 8(sp)
875
872
; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp)
876
- ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 7
877
- ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a0, 32
878
- ; LP64-LP64F-LP64D-FPELIM-NEXT: srli a2, a2, 32
879
- ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a2, 8
880
- ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 8(sp)
881
- ; LP64-LP64F-LP64D-FPELIM-NEXT: srliw a0, a0, 3
882
- ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 3
883
- ; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 0(a0)
884
- ; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a0
873
+ ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, sp, 31
874
+ ; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a2
875
+ ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp)
885
876
; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 64
886
877
; LP64-LP64F-LP64D-FPELIM-NEXT: ret
887
878
;
@@ -896,32 +887,24 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind {
896
887
; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 24(s0)
897
888
; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 16(s0)
898
889
; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 8(s0)
899
- ; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, s0
900
- ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -24(s0)
901
- ; LP64-LP64F-LP64D-WITHFP-NEXT: lw a0, -24(s0)
902
890
; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 0(s0)
903
- ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 7
904
- ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a0, 32
905
- ; LP64-LP64F-LP64D-WITHFP-NEXT: srli a2, a2, 32
906
- ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a2, a2, 8
907
- ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, -24(s0)
908
- ; LP64-LP64F-LP64D-WITHFP-NEXT: srliw a0, a0, 3
909
- ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 3
910
- ; LP64-LP64F-LP64D-WITHFP-NEXT: ld a0, 0(a0)
911
- ; LP64-LP64F-LP64D-WITHFP-NEXT: add a0, a1, a0
891
+ ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, s0, 15
892
+ ; LP64-LP64F-LP64D-WITHFP-NEXT: add a0, a1, a2
893
+ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, -24(s0)
912
894
; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
913
895
; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
914
896
; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80
915
897
; LP64-LP64F-LP64D-WITHFP-NEXT: ret
916
898
%va = alloca ptr
917
899
call void @llvm.va_start (ptr %va )
918
- %argp.cur = load i32 , ptr %va , align 4
919
- %1 = add i32 %argp.cur , 7
920
- %2 = and i32 %1 , -8
921
- %argp.cur.aligned = inttoptr i32 %1 to ptr
900
+ %argp.cur = load ptr , ptr %va
901
+ %ptrint = ptrtoint ptr %argp.cur to iXLen
902
+ %1 = add iXLen %ptrint , 7
903
+ %2 = and iXLen %1 , -8
904
+ %argp.cur.aligned = inttoptr iXLen %1 to ptr
922
905
%argp.next = getelementptr inbounds i8 , ptr %argp.cur.aligned , i32 8
923
- store ptr %argp.next , ptr %va , align 4
924
- %3 = inttoptr i32 %2 to ptr
906
+ store ptr %argp.next , ptr %va
907
+ %3 = inttoptr iXLen %2 to ptr
925
908
%4 = load double , ptr %3 , align 8
926
909
call void @llvm.va_end (ptr %va )
927
910
%5 = bitcast double %4 to i64
0 commit comments