@@ -639,9 +639,20 @@ bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
639
639
}
640
640
EXPORT_SYMBOL (_copy_from_iter_full_nocache );
641
641
642
+ static inline bool page_copy_sane (struct page * page , size_t offset , size_t n )
643
+ {
644
+ size_t v = n + offset ;
645
+ if (likely (n <= v && v <= (PAGE_SIZE << compound_order (page ))))
646
+ return true;
647
+ WARN_ON (1 );
648
+ return false;
649
+ }
650
+
642
651
size_t copy_page_to_iter (struct page * page , size_t offset , size_t bytes ,
643
652
struct iov_iter * i )
644
653
{
654
+ if (unlikely (!page_copy_sane (page , offset , bytes )))
655
+ return 0 ;
645
656
if (i -> type & (ITER_BVEC |ITER_KVEC )) {
646
657
void * kaddr = kmap_atomic (page );
647
658
size_t wanted = copy_to_iter (kaddr + offset , bytes , i );
@@ -657,6 +668,8 @@ EXPORT_SYMBOL(copy_page_to_iter);
657
668
size_t copy_page_from_iter (struct page * page , size_t offset , size_t bytes ,
658
669
struct iov_iter * i )
659
670
{
671
+ if (unlikely (!page_copy_sane (page , offset , bytes )))
672
+ return 0 ;
660
673
if (unlikely (i -> type & ITER_PIPE )) {
661
674
WARN_ON (1 );
662
675
return 0 ;
@@ -713,6 +726,10 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
713
726
struct iov_iter * i , unsigned long offset , size_t bytes )
714
727
{
715
728
char * kaddr = kmap_atomic (page ), * p = kaddr + offset ;
729
+ if (unlikely (!page_copy_sane (page , offset , bytes ))) {
730
+ kunmap_atomic (kaddr );
731
+ return 0 ;
732
+ }
716
733
if (unlikely (i -> type & ITER_PIPE )) {
717
734
kunmap_atomic (kaddr );
718
735
WARN_ON (1 );
0 commit comments