23
23
/*
24
24
* High level FPU state handling functions:
25
25
*/
26
- extern void fpu__activate_curr (struct fpu * fpu );
27
- extern void fpu__activate_fpstate_read (struct fpu * fpu );
28
- extern void fpu__activate_fpstate_write (struct fpu * fpu );
29
- extern void fpu__current_fpstate_write_begin (void );
30
- extern void fpu__current_fpstate_write_end (void );
26
+ extern void fpu__initialize (struct fpu * fpu );
27
+ extern void fpu__prepare_read (struct fpu * fpu );
28
+ extern void fpu__prepare_write (struct fpu * fpu );
31
29
extern void fpu__save (struct fpu * fpu );
32
30
extern void fpu__restore (struct fpu * fpu );
33
31
extern int fpu__restore_sig (void __user * buf , int ia32_frame );
@@ -120,20 +118,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
120
118
err; \
121
119
})
122
120
123
- #define check_insn (insn , output , input ...) \
124
- ({ \
125
- int err; \
121
+ #define kernel_insn (insn , output , input ...) \
126
122
asm volatile("1:" #insn "\n\t" \
127
123
"2:\n" \
128
- ".section .fixup,\"ax\"\n" \
129
- "3: movl $-1,%[err]\n" \
130
- " jmp 2b\n" \
131
- ".previous\n" \
132
- _ASM_EXTABLE(1b, 3b) \
133
- : [err] "=r" (err), output \
134
- : "0"(0), input); \
135
- err; \
136
- })
124
+ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
125
+ : output : input)
137
126
138
127
static inline int copy_fregs_to_user (struct fregs_state __user * fx )
139
128
{
@@ -153,20 +142,16 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
153
142
154
143
static inline void copy_kernel_to_fxregs (struct fxregs_state * fx )
155
144
{
156
- int err ;
157
-
158
145
if (IS_ENABLED (CONFIG_X86_32 )) {
159
- err = check_insn (fxrstor %[fx ], "=m" (* fx ), [fx ] "m" (* fx ));
146
+ kernel_insn (fxrstor %[fx ], "=m" (* fx ), [fx ] "m" (* fx ));
160
147
} else {
161
148
if (IS_ENABLED (CONFIG_AS_FXSAVEQ )) {
162
- err = check_insn (fxrstorq %[fx ], "=m" (* fx ), [fx ] "m" (* fx ));
149
+ kernel_insn (fxrstorq %[fx ], "=m" (* fx ), [fx ] "m" (* fx ));
163
150
} else {
164
151
/* See comment in copy_fxregs_to_kernel() below. */
165
- err = check_insn (rex64 /fxrstor (%[fx ]), "=m" (* fx ), [fx ] "R" (fx ), "m" (* fx ));
152
+ kernel_insn (rex64 /fxrstor (%[fx ]), "=m" (* fx ), [fx ] "R" (fx ), "m" (* fx ));
166
153
}
167
154
}
168
- /* Copying from a kernel buffer to FPU registers should never fail: */
169
- WARN_ON_FPU (err );
170
155
}
171
156
172
157
static inline int copy_user_to_fxregs (struct fxregs_state __user * fx )
@@ -183,9 +168,7 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
183
168
184
169
static inline void copy_kernel_to_fregs (struct fregs_state * fx )
185
170
{
186
- int err = check_insn (frstor %[fx ], "=m" (* fx ), [fx ] "m" (* fx ));
187
-
188
- WARN_ON_FPU (err );
171
+ kernel_insn (frstor %[fx ], "=m" (* fx ), [fx ] "m" (* fx ));
189
172
}
190
173
191
174
static inline int copy_user_to_fregs (struct fregs_state __user * fx )
@@ -281,18 +264,13 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
281
264
* Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
282
265
* XSAVE area format.
283
266
*/
284
- #define XSTATE_XRESTORE (st , lmask , hmask , err ) \
267
+ #define XSTATE_XRESTORE (st , lmask , hmask ) \
285
268
asm volatile(ALTERNATIVE(XRSTOR, \
286
269
XRSTORS, X86_FEATURE_XSAVES) \
287
270
"\n" \
288
- "xor %[err], %[err]\n" \
289
271
"3:\n" \
290
- ".pushsection .fixup,\"ax\"\n" \
291
- "4: movl $-2, %[err]\n" \
292
- "jmp 3b\n" \
293
- ".popsection\n" \
294
- _ASM_EXTABLE(661b, 4b) \
295
- : [err] "=r" (err) \
272
+ _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
273
+ : \
296
274
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
297
275
: "memory")
298
276
@@ -336,7 +314,10 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
336
314
else
337
315
XSTATE_OP (XRSTOR , xstate , lmask , hmask , err );
338
316
339
- /* We should never fault when copying from a kernel buffer: */
317
+ /*
318
+ * We should never fault when copying from a kernel buffer, and the FPU
319
+ * state we set at boot time should be valid.
320
+ */
340
321
WARN_ON_FPU (err );
341
322
}
342
323
@@ -350,7 +331,7 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
350
331
u32 hmask = mask >> 32 ;
351
332
int err ;
352
333
353
- WARN_ON (!alternatives_patched );
334
+ WARN_ON_FPU (!alternatives_patched );
354
335
355
336
XSTATE_XSAVE (xstate , lmask , hmask , err );
356
337
@@ -365,12 +346,8 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
365
346
{
366
347
u32 lmask = mask ;
367
348
u32 hmask = mask >> 32 ;
368
- int err ;
369
-
370
- XSTATE_XRESTORE (xstate , lmask , hmask , err );
371
349
372
- /* We should never fault when copying from a kernel buffer: */
373
- WARN_ON_FPU (err );
350
+ XSTATE_XRESTORE (xstate , lmask , hmask );
374
351
}
375
352
376
353
/*
@@ -526,37 +503,16 @@ static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
526
503
*/
527
504
static inline void fpregs_deactivate (struct fpu * fpu )
528
505
{
529
- WARN_ON_FPU (!fpu -> fpregs_active );
530
-
531
- fpu -> fpregs_active = 0 ;
532
506
this_cpu_write (fpu_fpregs_owner_ctx , NULL );
533
507
trace_x86_fpu_regs_deactivated (fpu );
534
508
}
535
509
536
510
static inline void fpregs_activate (struct fpu * fpu )
537
511
{
538
- WARN_ON_FPU (fpu -> fpregs_active );
539
-
540
- fpu -> fpregs_active = 1 ;
541
512
this_cpu_write (fpu_fpregs_owner_ctx , fpu );
542
513
trace_x86_fpu_regs_activated (fpu );
543
514
}
544
515
545
- /*
546
- * The question "does this thread have fpu access?"
547
- * is slightly racy, since preemption could come in
548
- * and revoke it immediately after the test.
549
- *
550
- * However, even in that very unlikely scenario,
551
- * we can just assume we have FPU access - typically
552
- * to save the FP state - we'll just take a #NM
553
- * fault and get the FPU access back.
554
- */
555
- static inline int fpregs_active (void )
556
- {
557
- return current -> thread .fpu .fpregs_active ;
558
- }
559
-
560
516
/*
561
517
* FPU state switching for scheduling.
562
518
*
@@ -571,14 +527,13 @@ static inline int fpregs_active(void)
571
527
static inline void
572
528
switch_fpu_prepare (struct fpu * old_fpu , int cpu )
573
529
{
574
- if (old_fpu -> fpregs_active ) {
530
+ if (old_fpu -> initialized ) {
575
531
if (!copy_fpregs_to_fpstate (old_fpu ))
576
532
old_fpu -> last_cpu = -1 ;
577
533
else
578
534
old_fpu -> last_cpu = cpu ;
579
535
580
536
/* But leave fpu_fpregs_owner_ctx! */
581
- old_fpu -> fpregs_active = 0 ;
582
537
trace_x86_fpu_regs_deactivated (old_fpu );
583
538
} else
584
539
old_fpu -> last_cpu = -1 ;
@@ -595,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
595
550
static inline void switch_fpu_finish (struct fpu * new_fpu , int cpu )
596
551
{
597
552
bool preload = static_cpu_has (X86_FEATURE_FPU ) &&
598
- new_fpu -> fpstate_active ;
553
+ new_fpu -> initialized ;
599
554
600
555
if (preload ) {
601
556
if (!fpregs_state_valid (new_fpu , cpu ))
@@ -617,8 +572,7 @@ static inline void user_fpu_begin(void)
617
572
struct fpu * fpu = & current -> thread .fpu ;
618
573
619
574
preempt_disable ();
620
- if (!fpregs_active ())
621
- fpregs_activate (fpu );
575
+ fpregs_activate (fpu );
622
576
preempt_enable ();
623
577
}
624
578
0 commit comments