Skip to content

Commit fcb3635

Browse files
KAGA-KOKOsuryasaimadhu
authored andcommitted
x86/fpu/signal: Handle #PF in the direct restore path
If *RSTOR raises an exception, then the slow path is taken. That's wrong because if the reason was not #PF then going through the slow path is waste of time because that will end up with the same conclusion that the data is invalid. Now that the wrapper around *RSTOR return an negative error code, which is the negated trap number, it's possible to differentiate. If the *RSTOR raised #PF then handle it directly in the fast path and if it was some other exception, e.g. #GP, then give up and do not try the fast path. This removes the legacy frame FRSTOR code from the slow path because FRSTOR is not a ia32_fxstate frame and is therefore handled in the fast path. Signed-off-by: Thomas Gleixner <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent aee8c67 commit fcb3635

File tree

1 file changed

+33
-34
lines changed

1 file changed

+33
-34
lines changed

arch/x86/kernel/fpu/signal.c

Lines changed: 33 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -272,11 +272,17 @@ static int __restore_fpregs_from_user(void __user *buf, u64 xrestore,
272272
}
273273
}
274274

275-
static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only)
275+
/*
276+
* Attempt to restore the FPU registers directly from user memory.
277+
* Pagefaults are handled and any errors returned are fatal.
278+
*/
279+
static int restore_fpregs_from_user(void __user *buf, u64 xrestore,
280+
bool fx_only, unsigned int size)
276281
{
277282
struct fpu *fpu = &current->thread.fpu;
278283
int ret;
279284

285+
retry:
280286
fpregs_lock();
281287
pagefault_disable();
282288
ret = __restore_fpregs_from_user(buf, xrestore, fx_only);
@@ -293,14 +299,18 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
293299
* invalidate the FPU register state otherwise the task
294300
* might preempt current and return to user space with
295301
* corrupted FPU registers.
296-
*
297-
* In case current owns the FPU registers then no further
298-
* action is required. The fixup in the slow path will
299-
* handle it correctly.
300302
*/
301303
if (test_thread_flag(TIF_NEED_FPU_LOAD))
302304
__cpu_invalidate_fpregs_state();
303305
fpregs_unlock();
306+
307+
/* Try to handle #PF, but anything else is fatal. */
308+
if (ret != -EFAULT)
309+
return -EINVAL;
310+
311+
ret = fault_in_pages_readable(buf, size);
312+
if (!ret)
313+
goto retry;
304314
return ret;
305315
}
306316

@@ -311,9 +321,7 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
311321
*
312322
* It would be optimal to handle this with a single XRSTORS, but
313323
* this does not work because the rest of the FPU registers have
314-
* been restored from a user buffer directly. The single XRSTORS
315-
* happens below, when the user buffer has been copied to the
316-
* kernel one.
324+
* been restored from a user buffer directly.
317325
*/
318326
if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor())
319327
os_xrstor(&fpu->state.xsave, xfeatures_mask_supervisor());
@@ -326,14 +334,13 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
326334
static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
327335
bool ia32_fxstate)
328336
{
329-
struct user_i387_ia32_struct *envp = NULL;
330337
int state_size = fpu_kernel_xstate_size;
331338
struct task_struct *tsk = current;
332339
struct fpu *fpu = &tsk->thread.fpu;
333340
struct user_i387_ia32_struct env;
334341
u64 user_xfeatures = 0;
335342
bool fx_only = false;
336-
int ret = 0;
343+
int ret;
337344

338345
if (use_xsave()) {
339346
struct _fpx_sw_bytes fx_sw_user;
@@ -354,20 +361,19 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
354361
* faults. If it does, fall back to the slow path below, going
355362
* through the kernel buffer with the enabled pagefault handler.
356363
*/
357-
ret = restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only);
358-
if (likely(!ret))
359-
return 0;
360-
} else {
361-
/*
362-
* For 32-bit frames with fxstate, copy the fxstate so it can
363-
* be reconstructed later.
364-
*/
365-
ret = __copy_from_user(&env, buf, sizeof(env));
366-
if (ret)
367-
return ret;
368-
envp = &env;
364+
return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only,
365+
state_size);
369366
}
370367

368+
/*
369+
* Copy the legacy state because the FP portion of the FX frame has
370+
* to be ignored for histerical raisins. The legacy state is folded
371+
* in once the larger state has been copied.
372+
*/
373+
ret = __copy_from_user(&env, buf, sizeof(env));
374+
if (ret)
375+
return ret;
376+
371377
/*
372378
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
373379
* not modified on context switch and that the xstate is considered
@@ -382,8 +388,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
382388
* supervisor state is preserved. Save the full state for
383389
* simplicity. There is no point in optimizing this by only
384390
* saving the supervisor states and then shuffle them to
385-
* the right place in memory. This is the slow path and the
386-
* above XRSTOR failed or ia32_fxstate is true. Shrug.
391+
* the right place in memory. It's ia32 mode. Shrug.
387392
*/
388393
if (xfeatures_mask_supervisor())
389394
os_xsave(&fpu->state.xsave);
@@ -399,7 +404,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
399404
if (ret)
400405
return ret;
401406

402-
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures);
407+
sanitize_restored_user_xstate(&fpu->state, &env, user_xfeatures);
403408

404409
fpregs_lock();
405410
if (unlikely(init_bv))
@@ -412,12 +417,12 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
412417
ret = os_xrstor_safe(&fpu->state.xsave,
413418
user_xfeatures | xfeatures_mask_supervisor());
414419

415-
} else if (use_fxsr()) {
420+
} else {
416421
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
417422
if (ret)
418423
return -EFAULT;
419424

420-
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures);
425+
sanitize_restored_user_xstate(&fpu->state, &env, user_xfeatures);
421426

422427
fpregs_lock();
423428
if (use_xsave()) {
@@ -428,14 +433,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
428433
}
429434

430435
ret = fxrstor_safe(&fpu->state.fxsave);
431-
} else {
432-
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
433-
if (ret)
434-
return ret;
435-
436-
fpregs_lock();
437-
ret = frstor_safe(&fpu->state.fsave);
438436
}
437+
439438
if (!ret)
440439
fpregs_mark_activate();
441440
else

0 commit comments

Comments
 (0)