@@ -272,11 +272,17 @@ static int __restore_fpregs_from_user(void __user *buf, u64 xrestore,
272
272
}
273
273
}
274
274
275
- static int restore_fpregs_from_user (void __user * buf , u64 xrestore , bool fx_only )
275
+ /*
276
+ * Attempt to restore the FPU registers directly from user memory.
277
+ * Pagefaults are handled and any errors returned are fatal.
278
+ */
279
+ static int restore_fpregs_from_user (void __user * buf , u64 xrestore ,
280
+ bool fx_only , unsigned int size )
276
281
{
277
282
struct fpu * fpu = & current -> thread .fpu ;
278
283
int ret ;
279
284
285
+ retry :
280
286
fpregs_lock ();
281
287
pagefault_disable ();
282
288
ret = __restore_fpregs_from_user (buf , xrestore , fx_only );
@@ -293,14 +299,18 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
293
299
* invalidate the FPU register state otherwise the task
294
300
* might preempt current and return to user space with
295
301
* corrupted FPU registers.
296
- *
297
- * In case current owns the FPU registers then no further
298
- * action is required. The fixup in the slow path will
299
- * handle it correctly.
300
302
*/
301
303
if (test_thread_flag (TIF_NEED_FPU_LOAD ))
302
304
__cpu_invalidate_fpregs_state ();
303
305
fpregs_unlock ();
306
+
307
+ /* Try to handle #PF, but anything else is fatal. */
308
+ if (ret != - EFAULT )
309
+ return - EINVAL ;
310
+
311
+ ret = fault_in_pages_readable (buf , size );
312
+ if (!ret )
313
+ goto retry ;
304
314
return ret ;
305
315
}
306
316
@@ -311,9 +321,7 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
311
321
*
312
322
* It would be optimal to handle this with a single XRSTORS, but
313
323
* this does not work because the rest of the FPU registers have
314
- * been restored from a user buffer directly. The single XRSTORS
315
- * happens below, when the user buffer has been copied to the
316
- * kernel one.
324
+ * been restored from a user buffer directly.
317
325
*/
318
326
if (test_thread_flag (TIF_NEED_FPU_LOAD ) && xfeatures_mask_supervisor ())
319
327
os_xrstor (& fpu -> state .xsave , xfeatures_mask_supervisor ());
@@ -326,14 +334,13 @@ static int restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only
326
334
static int __fpu_restore_sig (void __user * buf , void __user * buf_fx ,
327
335
bool ia32_fxstate )
328
336
{
329
- struct user_i387_ia32_struct * envp = NULL ;
330
337
int state_size = fpu_kernel_xstate_size ;
331
338
struct task_struct * tsk = current ;
332
339
struct fpu * fpu = & tsk -> thread .fpu ;
333
340
struct user_i387_ia32_struct env ;
334
341
u64 user_xfeatures = 0 ;
335
342
bool fx_only = false;
336
- int ret = 0 ;
343
+ int ret ;
337
344
338
345
if (use_xsave ()) {
339
346
struct _fpx_sw_bytes fx_sw_user ;
@@ -354,20 +361,19 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
354
361
* faults. If it does, fall back to the slow path below, going
355
362
* through the kernel buffer with the enabled pagefault handler.
356
363
*/
357
- ret = restore_fpregs_from_user (buf_fx , user_xfeatures , fx_only );
358
- if (likely (!ret ))
359
- return 0 ;
360
- } else {
361
- /*
362
- * For 32-bit frames with fxstate, copy the fxstate so it can
363
- * be reconstructed later.
364
- */
365
- ret = __copy_from_user (& env , buf , sizeof (env ));
366
- if (ret )
367
- return ret ;
368
- envp = & env ;
364
+ return restore_fpregs_from_user (buf_fx , user_xfeatures , fx_only ,
365
+ state_size );
369
366
}
370
367
368
+ /*
369
+ * Copy the legacy state because the FP portion of the FX frame has
370
+ * to be ignored for histerical raisins. The legacy state is folded
371
+ * in once the larger state has been copied.
372
+ */
373
+ ret = __copy_from_user (& env , buf , sizeof (env ));
374
+ if (ret )
375
+ return ret ;
376
+
371
377
/*
372
378
* By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is
373
379
* not modified on context switch and that the xstate is considered
@@ -382,8 +388,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
382
388
* supervisor state is preserved. Save the full state for
383
389
* simplicity. There is no point in optimizing this by only
384
390
* saving the supervisor states and then shuffle them to
385
- * the right place in memory. This is the slow path and the
386
- * above XRSTOR failed or ia32_fxstate is true. Shrug.
391
+ * the right place in memory. It's ia32 mode. Shrug.
387
392
*/
388
393
if (xfeatures_mask_supervisor ())
389
394
os_xsave (& fpu -> state .xsave );
@@ -399,7 +404,7 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
399
404
if (ret )
400
405
return ret ;
401
406
402
- sanitize_restored_user_xstate (& fpu -> state , envp , user_xfeatures );
407
+ sanitize_restored_user_xstate (& fpu -> state , & env , user_xfeatures );
403
408
404
409
fpregs_lock ();
405
410
if (unlikely (init_bv ))
@@ -412,12 +417,12 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
412
417
ret = os_xrstor_safe (& fpu -> state .xsave ,
413
418
user_xfeatures | xfeatures_mask_supervisor ());
414
419
415
- } else if ( use_fxsr ()) {
420
+ } else {
416
421
ret = __copy_from_user (& fpu -> state .fxsave , buf_fx , state_size );
417
422
if (ret )
418
423
return - EFAULT ;
419
424
420
- sanitize_restored_user_xstate (& fpu -> state , envp , user_xfeatures );
425
+ sanitize_restored_user_xstate (& fpu -> state , & env , user_xfeatures );
421
426
422
427
fpregs_lock ();
423
428
if (use_xsave ()) {
@@ -428,14 +433,8 @@ static int __fpu_restore_sig(void __user *buf, void __user *buf_fx,
428
433
}
429
434
430
435
ret = fxrstor_safe (& fpu -> state .fxsave );
431
- } else {
432
- ret = __copy_from_user (& fpu -> state .fsave , buf_fx , state_size );
433
- if (ret )
434
- return ret ;
435
-
436
- fpregs_lock ();
437
- ret = frstor_safe (& fpu -> state .fsave );
438
436
}
437
+
439
438
if (!ret )
440
439
fpregs_mark_activate ();
441
440
else
0 commit comments