22
22
* Patrick O'Rourke <[email protected] >
23
23
* 11 / 07 / 2000
24
24
* /
25
+ / *
26
+ * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp >
27
+ * VA Linux Systems Japan K.K.
28
+ * pv_ops.
29
+ * /
25
30
/ *
26
31
* Global (preserved) predicate usage on syscall entry/exit path:
27
32
*
45
50
46
51
#include "minstate.h"
47
52
53
+ #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
48
54
/ *
49
55
* execve() is special because in case of success , we need to
50
56
* setup a null register window frame.
@@ -173,14 +179,15 @@ GLOBAL_ENTRY(sys_clone)
173
179
mov rp=loc0
174
180
br. ret .sptk.many rp
175
181
END(sys_clone)
182
+ #endif / * __IA64_ASM_PARAVIRTUALIZED_NATIVE * /
176
183
177
184
/ *
178
185
* prev_task < - ia64_switch_to(struct task_struct * next)
179
186
* With Ingo's new scheduler , interrupts are disabled when this routine gets
180
187
* called. The code starting at .map relies on this. The rest of the code
181
188
* doesn't care about the interrupt masking status.
182
189
* /
183
- GLOBAL_ENTRY(ia64_switch_to )
190
+ GLOBAL_ENTRY(__paravirt_switch_to )
184
191
.prologue
185
192
alloc r16=ar.pfs , 1 , 0 , 0 , 0
186
193
DO_SAVE_SWITCH_STACK
@@ -204,7 +211,7 @@ GLOBAL_ENTRY(ia64_switch_to)
204
211
;;
205
212
.done:
206
213
ld8 sp = [ r21 ] // load kernel stack pointer of new task
207
- mov IA64_KR (CURRENT)= in0 // update "current" application register
214
+ MOV_TO_KR (CURRENT , in0, r8 , r9 ) // update "current" application register
208
215
mov r8 = r13 // return pointer to previously running task
209
216
mov r13 =in0 // set "current" pointer
210
217
;;
@@ -216,26 +223,25 @@ GLOBAL_ENTRY(ia64_switch_to)
216
223
br. ret .sptk.many rp // boogie on out in new context
217
224
218
225
.map:
219
- rsm psr.ic // interrupts (psr.i) are already disabled here
226
+ RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
220
227
movl r25=PAGE_KERNEL
221
228
;;
222
229
srlz.d
223
230
or r23=r25 , r20 // construct PA | page properties
224
231
mov r25=IA64_GRANULE_SHIFT<< 2
225
232
;;
226
- mov cr.itir= r25
227
- mov cr.ifa= in0 // VA of next task...
233
+ MOV_TO_ITIR(p0 , r25, r8 )
234
+ MOV_TO_IFA( in0 , r8 ) // VA of next task...
228
235
;;
229
236
mov r25=IA64_TR_CURRENT_STACK
230
- mov IA64_KR (CURRENT_STACK)= r26 // remember last page we mapped...
237
+ MOV_TO_KR (CURRENT_STACK , r26, r8 , r9 ) // remember last page we mapped...
231
238
;;
232
239
itr.d dtr [ r25 ] =r23 // wire in new mapping...
233
- ssm psr.ic // reenable the psr.ic bit
234
- ;;
235
- srlz.d
240
+ SSM_PSR_IC_AND_SRLZ_D( r8 , r9 ) // reenable the psr.ic bit
236
241
br.cond.sptk .done
237
- END(ia64_switch_to )
242
+ END(__paravirt_switch_to )
238
243
244
+ #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
239
245
/ *
240
246
* Note th at interrupts are enabled during save_switch_stack and load_switch_stack. This
241
247
* means th at we may get an interrupt with "sp" pointing to the new kernel stack while
@@ -375,7 +381,7 @@ END(save_switch_stack)
375
381
* - b7 holds address to return to
376
382
* - must not touch r8 - r11
377
383
* /
378
- ENTRY (load_switch_stack)
384
+ GLOBAL_ENTRY (load_switch_stack)
379
385
.prologue
380
386
.altrp b7
381
387
@@ -571,7 +577,7 @@ GLOBAL_ENTRY(ia64_trace_syscall)
571
577
.ret3:
572
578
(pUStk) cmp .eq.unc p6 , p0=r0 , r0 // p6 < - pUStk
573
579
(pUStk) rsm psr.i // disable interrupts
574
- br.cond.sptk .work_pending_syscall_end
580
+ br.cond.sptk ia64_work_pending_syscall_end
575
581
576
582
strace_error:
577
583
ld8 r3= [ r2 ] // load pt_regs. r8
@@ -636,8 +642,17 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
636
642
adds r2=PT( R8 ) + 16 , sp // r2 = &pt_regs. r8
637
643
mov r10 =r0 // clear error indication in r10
638
644
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
645
+ #ifdef CONFIG_PARAVIRT
646
+ ;;
647
+ br.cond.sptk.few ia64_leave_syscall
648
+ ;;
649
+ #endif / * CONFIG_PARAVIRT * /
639
650
END(ia64_ret_from_syscall)
651
+ #ifndef CONFIG_PARAVIRT
640
652
// fall through
653
+ #endif
654
+ #endif / * __IA64_ASM_PARAVIRTUALIZED_NATIVE * /
655
+
641
656
/ *
642
657
* ia64_leave_syscall(): Same as ia64_leave_kernel , except th at it doesn't
643
658
* need to switch to bank 0 and doesn't restore the scratch registers.
@@ -682,7 +697,7 @@ END(ia64_ret_from_syscall)
682
697
* ar.csd: cleared
683
698
* ar.ssd: cleared
684
699
* /
685
- ENTRY(ia64_leave_syscall )
700
+ GLOBAL_ENTRY(__paravirt_leave_syscall )
686
701
PT_REGS_UNWIND_INFO( 0 )
687
702
/ *
688
703
* work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -692,11 +707,11 @@ ENTRY(ia64_leave_syscall)
692
707
* extra work. We always check for extra work when returning to user - level.
693
708
* With CONFIG_PREEMPT , we also check for extra work when the preempt_count
694
709
* is 0 . After extra work processing has been completed , execution
695
- * resumes at .work_processed_syscall with p6 set to 1 if the extra - work - check
710
+ * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra - work - check
696
711
* needs to be redone.
697
712
* /
698
713
#ifdef CONFIG_PREEMPT
699
- rsm psr.i // disable interrupts
714
+ RSM_PSR_I(p0 , r2 , r18) // disable interrupts
700
715
cmp .eq pLvSys , p0=r0 , r0 // pLvSys= 1 : leave from syscall
701
716
(pKStk) adds r20=TI_PRE_COUNT + IA64_TASK_SIZE , r13
702
717
;;
@@ -706,11 +721,12 @@ ENTRY(ia64_leave_syscall)
706
721
;;
707
722
cmp .eq p6 , p0=r21 , r0 // p6 < - pUStk || (preempt_count == 0 )
708
723
#else / * !CONFIG_PREEMPT * /
709
- (pUStk) rsm psr.i
724
+ RSM_PSR_I (pUStk, r2 , r18)
710
725
cmp .eq pLvSys , p0=r0 , r0 // pLvSys= 1 : leave from syscall
711
726
(pUStk) cmp .eq.unc p6 , p0=r0 , r0 // p6 < - pUStk
712
727
#endif
713
- .work_processed_syscall:
728
+ . global __paravirt_work_processed_syscall ;
729
+ __paravirt_work_processed_syscall:
714
730
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
715
731
adds r2=PT(LOADRS) + 16 , r12
716
732
(pUStk) mov .m r22=ar.itc // fetch time at leave
@@ -744,7 +760,7 @@ ENTRY(ia64_leave_syscall)
744
760
(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
745
761
;;
746
762
invala // M0| 1 invalidate AL AT
747
- rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
763
+ RSM_PSR_I_IC(r28 , r29 , r30) // M2 turn off interrupts and interruption collection
748
764
cmp .eq p9 , p0=r0 , r0 // A set p9 to indicate th at we should restore cr.ifs
749
765
750
766
ld8 r29= [ r2 ], 16 // M0| 1 load cr.ipsr
@@ -765,7 +781,7 @@ ENTRY(ia64_leave_syscall)
765
781
;;
766
782
#endif
767
783
ld8 r26= [ r2 ], PT(B0) - PT(AR_PFS) // M0| 1 load ar.pfs
768
- (pKStk) mov r22=psr // M2 read PSR now th at interrupts are disabled
784
+ MOV_FROM_PSR (pKStk, r22 , r21) // M2 read PSR now that interrupts are disabled
769
785
nop 0
770
786
;;
771
787
ld8 r21= [ r2 ], PT(AR_RN AT ) - PT(B0) // M0| 1 load b0
@@ -798,7 +814,7 @@ ENTRY(ia64_leave_syscall)
798
814
799
815
srlz.d // M0 ensure interruption collection is off (for cover)
800
816
shr .u r18=r19 , 16 // I0| 1 get byte size of existing "dirty" partition
801
- cover // B add current frame into dirty partition & set cr.ifs
817
+ COVER // B add current frame into dirty partition & set cr.ifs
802
818
;;
803
819
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
804
820
mov r19=ar.bsp // M2 get new backing store pointer
@@ -823,8 +839,9 @@ ENTRY(ia64_leave_syscall)
823
839
mov .m ar.ssd=r0 // M2 clear ar.ssd
824
840
mov f11=f0 // F clear f11
825
841
br.cond.sptk.many rbs_switch // B
826
- END(ia64_leave_syscall )
842
+ END(__paravirt_leave_syscall )
827
843
844
+ #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
828
845
#ifdef CONFIG_IA32_SUPPORT
829
846
GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
830
847
PT_REGS_UNWIND_INFO( 0 )
@@ -835,10 +852,20 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
835
852
st8.spill [ r2 ] = r8 // store return value in slot for r8 and set un at bit
836
853
.mem.offset 8 , 0
837
854
st8.spill [ r3 ] =r0 // clear error indication in slot for r10 and set un at bit
855
+ #ifdef CONFIG_PARAVIRT
856
+ ;;
857
+ // don 't fall through, ia64_leave_kernel may be #define' d
858
+ br.cond.sptk.few ia64_leave_kernel
859
+ ;;
860
+ #endif / * CONFIG_PARAVIRT * /
838
861
END(ia64_ret_from_ia32_execve)
862
+ #ifndef CONFIG_PARAVIRT
839
863
// fall through
864
+ #endif
840
865
#endif / * CONFIG_IA32_SUPPORT * /
841
- GLOBAL_ENTRY(ia64_leave_kernel)
866
+ #endif / * __IA64_ASM_PARAVIRTUALIZED_NATIVE * /
867
+
868
+ GLOBAL_ENTRY(__paravirt_leave_kernel)
842
869
PT_REGS_UNWIND_INFO( 0 )
843
870
/ *
844
871
* work.need_resched etc. mustn't get changed by this CPU before it returns to
@@ -852,7 +879,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
852
879
* needs to be redone.
853
880
* /
854
881
#ifdef CONFIG_PREEMPT
855
- rsm psr.i // disable interrupts
882
+ RSM_PSR_I(p0 , r17 , r31) // disable interrupts
856
883
cmp .eq p0 , pLvSys=r0 , r0 // pLvSys= 0 : leave from kernel
857
884
(pKStk) adds r20=TI_PRE_COUNT + IA64_TASK_SIZE , r13
858
885
;;
@@ -862,7 +889,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
862
889
;;
863
890
cmp .eq p6 , p0=r21 , r0 // p6 < - pUStk || (preempt_count == 0 )
864
891
#else
865
- (pUStk) rsm psr.i
892
+ RSM_PSR_I (pUStk, r17 , r31)
866
893
cmp .eq p0 , pLvSys=r0 , r0 // pLvSys= 0 : leave from kernel
867
894
(pUStk) cmp .eq.unc p6 , p0=r0 , r0 // p6 < - pUStk
868
895
#endif
@@ -910,7 +937,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
910
937
mov ar.csd=r30
911
938
mov ar.ssd=r31
912
939
;;
913
- rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
940
+ RSM_PSR_I_IC(r23 , r22 , r25) // initiate turning off of interrupt and interruption collection
914
941
invala // invalidate AL AT
915
942
;;
916
943
ld8.fill r22= [ r2 ], 24
@@ -942,20 +969,20 @@ GLOBAL_ENTRY(ia64_leave_kernel)
942
969
mov ar.ccv= r15
943
970
;;
944
971
ldf.fill f11= [ r2 ]
945
- bsw. 0 // switch back to bank 0 (no stop bit required beforehand...)
972
+ BSW_0(r2 , r3 , r15 ) // switch back to bank 0 (no stop bit required beforehand...)
946
973
;;
947
974
(pUStk) mov r18=IA64_KR(CURRENT)// M2 ( 12 cycle read latency)
948
975
adds r16=PT(CR_IPSR) + 16 , r12
949
976
adds r17=PT(CR_IIP) + 16 , r12
950
977
951
978
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
952
979
.pred.rel.mutex pUStk , pKStk
953
- (pKStk) mov r22=psr // M2 read PSR now th at interrupts are disabled
980
+ MOV_FROM_PSR (pKStk, r22 , r29) // M2 read PSR now that interrupts are disabled
954
981
(pUStk) mov .m r22=ar.itc // M fetch time at leave
955
982
nop .i 0
956
983
;;
957
984
#else
958
- (pKStk) mov r22=psr // M2 read PSR now th at interrupts are disabled
985
+ MOV_FROM_PSR (pKStk, r22 , r29) // M2 read PSR now that interrupts are disabled
959
986
nop .i 0
960
987
nop .i 0
961
988
;;
@@ -1027,7 +1054,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
1027
1054
* NOTE: alloc , loadrs , and cover can't be predicated.
1028
1055
* /
1029
1056
(pNonSys) br.cond.dpnt dont_preserve_current_frame
1030
- cover // add current frame into dirty partition and set cr.ifs
1057
+ COVER // add current frame into dirty partition and set cr.ifs
1031
1058
;;
1032
1059
mov r19=ar.bsp // get new backing store pointer
1033
1060
rbs_switch:
@@ -1130,16 +1157,16 @@ skip_rbs_switch:
1130
1157
(pKStk) dep r29=r22 , r29 , 21 , 1 // I0 update ipsr.pp with psr.pp
1131
1158
(pLvSys) mov r16=r0 // A clear r16 for leave_syscall , no - op otherwise
1132
1159
;;
1133
- mov cr.ipsr= r29 // M2
1160
+ MOV_TO_IPSR(p0 , r29, r25) // M2
1134
1161
mov ar.pfs=r26 // I0
1135
1162
(pLvSys) mov r17=r0 // A clear r17 for leave_syscall , no - op otherwise
1136
1163
1137
- (p9) mov cr.ifs= r30 // M2
1164
+ MOV_TO_IFS (p9, r30, r25) // M2
1138
1165
mov b0=r21 // I0
1139
1166
(pLvSys) mov r18=r0 // A clear r18 for leave_syscall , no - op otherwise
1140
1167
1141
1168
mov ar.fpsr=r20 // M2
1142
- mov cr.iip= r28 // M2
1169
+ MOV_TO_IIP( r28 , r25) // M2
1143
1170
nop 0
1144
1171
;;
1145
1172
(pUStk) mov ar.rn at =r24 // M2 must happen with RSE in lazy mode
@@ -1148,7 +1175,7 @@ skip_rbs_switch:
1148
1175
1149
1176
mov ar.rsc=r27 // M2
1150
1177
mov pr=r31 ,- 1 // I0
1151
- rfi // B
1178
+ RFI // B
1152
1179
1153
1180
/ *
1154
1181
* On entry:
@@ -1174,35 +1201,36 @@ skip_rbs_switch:
1174
1201
;;
1175
1202
(pKStk) st4 [ r20 ] =r21
1176
1203
#endif
1177
- ssm psr.i // enable interrupts
1204
+ SSM_PSR_I(p0 , p6 , r2) // enable interrupts
1178
1205
br. call .spnt.many rp=schedule
1179
1206
.ret9: cmp .eq p6 , p0=r0 , r0 // p6 < - 1 (re - check)
1180
- rsm psr.i // disable interrupts
1207
+ RSM_PSR_I(p0 , r2 , r20) // disable interrupts
1181
1208
;;
1182
1209
#ifdef CONFIG_PREEMPT
1183
1210
(pKStk) adds r20=TI_PRE_COUNT + IA64_TASK_SIZE , r13
1184
1211
;;
1185
1212
(pKStk) st4 [ r20 ] =r0 // preempt_count() < - 0
1186
1213
#endif
1187
- (pLvSys)br.cond.sptk.few .work_pending_syscall_end
1214
+ (pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1188
1215
br.cond.sptk.many .work_processed_kernel
1189
1216
1190
1217
.notify:
1191
1218
(pUStk) br. call .spnt.many rp=notify_resume_user
1192
1219
.ret10: cmp .ne p6 , p0=r0 , r0 // p6 < - 0 (don't re - check)
1193
- (pLvSys)br.cond.sptk.few .work_pending_syscall_end
1220
+ (pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end
1194
1221
br.cond.sptk.many .work_processed_kernel
1195
1222
1196
- .work_pending_syscall_end:
1223
+ . global __paravirt_pending_syscall_end ;
1224
+ __paravirt_pending_syscall_end:
1197
1225
adds r2=PT( R8 ) + 16 , r12
1198
1226
adds r3=PT( R10 ) + 16 , r12
1199
1227
;;
1200
1228
ld8 r8 = [ r2 ]
1201
1229
ld8 r10 = [ r3 ]
1202
- br.cond.sptk.many .work_processed_syscall
1203
-
1204
- END(ia64_leave_kernel)
1230
+ br.cond.sptk.many __paravirt_work_processed_syscall_target
1231
+ END(__paravirt_leave_kernel)
1205
1232
1233
+ #ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
1206
1234
ENTRY(handle_syscall_error)
1207
1235
/ *
1208
1236
* Some system calls (e.g. , ptrace , mmap) can return arbitrary values which could
@@ -1244,7 +1272,7 @@ END(ia64_invoke_schedule_tail)
1244
1272
* We declare 8 input registers so the system call args get preserved ,
1245
1273
* in case we need to restart a system call .
1246
1274
* /
1247
- ENTRY (notify_resume_user)
1275
+ GLOBAL_ENTRY (notify_resume_user)
1248
1276
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS , ASM_UNW_PRLG_GRSAVE( 8 )
1249
1277
alloc loc1=ar.pfs , 8 , 2 , 3 , 0 // preserve all eight input regs in case of syscall restart!
1250
1278
mov r9 =ar.un at
@@ -1306,7 +1334,7 @@ ENTRY(sys_rt_sigreturn)
1306
1334
adds sp = 16 , sp
1307
1335
;;
1308
1336
ld8 r9 = [ sp ] // load new ar.un at
1309
- mov .sptk b7= r8 , ia64_leave_kernel
1337
+ mov .sptk b7= r8 , ia64_native_leave_kernel
1310
1338
;;
1311
1339
mov ar.un at = r9
1312
1340
br.many b7
@@ -1665,3 +1693,4 @@ sys_call_table:
1665
1693
data8 sys_timerfd_gettime
1666
1694
1667
1695
.org sys_call_table + 8 * NR_syscalls // guard against failures to increase NR_syscalls
1696
+ #endif / * __IA64_ASM_PARAVIRTUALIZED_NATIVE * /
0 commit comments