@@ -253,14 +253,29 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
253
253
EXPORT_SYMBOL_GPL (_copy_mc_to_iter );
254
254
#endif /* CONFIG_ARCH_HAS_COPY_MC */
255
255
256
- static size_t memcpy_from_iter_mc (void * iter_from , size_t progress ,
257
- size_t len , void * to , void * priv2 )
256
+ static __always_inline
257
+ size_t memcpy_from_iter_mc (void * iter_from , size_t progress ,
258
+ size_t len , void * to , void * priv2 )
259
+ {
260
+ return copy_mc_to_kernel (to + progress , iter_from , len );
261
+ }
262
+
263
+ static size_t __copy_from_iter_mc (void * addr , size_t bytes , struct iov_iter * i )
258
264
{
259
- struct iov_iter * iter = priv2 ;
265
+ if (unlikely (i -> count < bytes ))
266
+ bytes = i -> count ;
267
+ if (unlikely (!bytes ))
268
+ return 0 ;
269
+ return iterate_bvec (i , bytes , addr , NULL , memcpy_from_iter_mc );
270
+ }
260
271
261
- if (iov_iter_is_copy_mc (iter ))
262
- return copy_mc_to_kernel (to + progress , iter_from , len );
263
- return memcpy_from_iter (iter_from , progress , len , to , priv2 );
272
+ static __always_inline
273
+ size_t __copy_from_iter (void * addr , size_t bytes , struct iov_iter * i )
274
+ {
275
+ if (unlikely (iov_iter_is_copy_mc (i )))
276
+ return __copy_from_iter_mc (addr , bytes , i );
277
+ return iterate_and_advance (i , bytes , addr ,
278
+ copy_from_user_iter , memcpy_from_iter );
264
279
}
265
280
266
281
size_t _copy_from_iter (void * addr , size_t bytes , struct iov_iter * i )
@@ -270,9 +285,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
270
285
271
286
if (user_backed_iter (i ))
272
287
might_fault ();
273
- return iterate_and_advance2 (i , bytes , addr , i ,
274
- copy_from_user_iter ,
275
- memcpy_from_iter_mc );
288
+ return __copy_from_iter (addr , bytes , i );
276
289
}
277
290
EXPORT_SYMBOL (_copy_from_iter );
278
291
@@ -493,9 +506,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
493
506
}
494
507
495
508
p = kmap_atomic (page ) + offset ;
496
- n = iterate_and_advance2 (i , n , p , i ,
497
- copy_from_user_iter ,
498
- memcpy_from_iter_mc );
509
+ n = __copy_from_iter (p , n , i );
499
510
kunmap_atomic (p );
500
511
copied += n ;
501
512
offset += n ;
0 commit comments