@@ -520,6 +520,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
520
520
521
521
size_t _copy_to_iter (const void * addr , size_t bytes , struct iov_iter * i )
522
522
{
523
+ if (WARN_ON_ONCE (i -> data_source ))
524
+ return 0 ;
523
525
if (unlikely (iov_iter_is_pipe (i )))
524
526
return copy_pipe_to_iter (addr , bytes , i );
525
527
if (user_backed_iter (i ))
@@ -606,6 +608,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
606
608
*/
607
609
size_t _copy_mc_to_iter (const void * addr , size_t bytes , struct iov_iter * i )
608
610
{
611
+ if (WARN_ON_ONCE (i -> data_source ))
612
+ return 0 ;
609
613
if (unlikely (iov_iter_is_pipe (i )))
610
614
return copy_mc_pipe_to_iter (addr , bytes , i );
611
615
if (user_backed_iter (i ))
@@ -622,10 +626,9 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
622
626
623
627
size_t _copy_from_iter (void * addr , size_t bytes , struct iov_iter * i )
624
628
{
625
- if (unlikely (iov_iter_is_pipe (i ))) {
626
- WARN_ON (1 );
629
+ if (WARN_ON_ONCE (!i -> data_source ))
627
630
return 0 ;
628
- }
631
+
629
632
if (user_backed_iter (i ))
630
633
might_fault ();
631
634
iterate_and_advance (i , bytes , base , len , off ,
@@ -639,10 +642,9 @@ EXPORT_SYMBOL(_copy_from_iter);
639
642
640
643
size_t _copy_from_iter_nocache (void * addr , size_t bytes , struct iov_iter * i )
641
644
{
642
- if (unlikely (iov_iter_is_pipe (i ))) {
643
- WARN_ON (1 );
645
+ if (WARN_ON_ONCE (!i -> data_source ))
644
646
return 0 ;
645
- }
647
+
646
648
iterate_and_advance (i , bytes , base , len , off ,
647
649
__copy_from_user_inatomic_nocache (addr + off , base , len ),
648
650
memcpy (addr + off , base , len )
@@ -671,10 +673,9 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
671
673
*/
672
674
size_t _copy_from_iter_flushcache (void * addr , size_t bytes , struct iov_iter * i )
673
675
{
674
- if (unlikely (iov_iter_is_pipe (i ))) {
675
- WARN_ON (1 );
676
+ if (WARN_ON_ONCE (!i -> data_source ))
676
677
return 0 ;
677
- }
678
+
678
679
iterate_and_advance (i , bytes , base , len , off ,
679
680
__copy_from_user_flushcache (addr + off , base , len ),
680
681
memcpy_flushcache (addr + off , base , len )
@@ -714,6 +715,8 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
714
715
size_t res = 0 ;
715
716
if (!page_copy_sane (page , offset , bytes ))
716
717
return 0 ;
718
+ if (WARN_ON_ONCE (i -> data_source ))
719
+ return 0 ;
717
720
if (unlikely (iov_iter_is_pipe (i )))
718
721
return copy_page_to_iter_pipe (page , offset , bytes , i );
719
722
page += offset / PAGE_SIZE ; // first subpage
@@ -811,9 +814,8 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt
811
814
kunmap_atomic (kaddr );
812
815
return 0 ;
813
816
}
814
- if (unlikely ( iov_iter_is_pipe ( i ) || iov_iter_is_discard ( i ) )) {
817
+ if (WARN_ON_ONCE (! i -> data_source )) {
815
818
kunmap_atomic (kaddr );
816
- WARN_ON (1 );
817
819
return 0 ;
818
820
}
819
821
iterate_and_advance (i , bytes , base , len , off ,
@@ -1525,10 +1527,9 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1525
1527
{
1526
1528
__wsum sum , next ;
1527
1529
sum = * csum ;
1528
- if (unlikely (iov_iter_is_pipe (i ) || iov_iter_is_discard (i ))) {
1529
- WARN_ON (1 );
1530
+ if (WARN_ON_ONCE (!i -> data_source ))
1530
1531
return 0 ;
1531
- }
1532
+
1532
1533
iterate_and_advance (i , bytes , base , len , off , ({
1533
1534
next = csum_and_copy_from_user (base , addr + off , len );
1534
1535
sum = csum_block_add (sum , next , off );
@@ -1548,6 +1549,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1548
1549
struct csum_state * csstate = _csstate ;
1549
1550
__wsum sum , next ;
1550
1551
1552
+ if (WARN_ON_ONCE (i -> data_source ))
1553
+ return 0 ;
1551
1554
if (unlikely (iov_iter_is_discard (i ))) {
1552
1555
// can't use csum_memcpy() for that one - data is not copied
1553
1556
csstate -> csum = csum_block_add (csstate -> csum ,
0 commit comments