20
20
21
21
#include "internal.h"
22
22
23
+ struct follow_page_context {
24
+ struct dev_pagemap * pgmap ;
25
+ unsigned int page_mask ;
26
+ };
27
+
23
28
static struct page * no_page_table (struct vm_area_struct * vma ,
24
29
unsigned int flags )
25
30
{
@@ -71,10 +76,10 @@ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
71
76
}
72
77
73
78
static struct page * follow_page_pte (struct vm_area_struct * vma ,
74
- unsigned long address , pmd_t * pmd , unsigned int flags )
79
+ unsigned long address , pmd_t * pmd , unsigned int flags ,
80
+ struct dev_pagemap * * pgmap )
75
81
{
76
82
struct mm_struct * mm = vma -> vm_mm ;
77
- struct dev_pagemap * pgmap = NULL ;
78
83
struct page * page ;
79
84
spinlock_t * ptl ;
80
85
pte_t * ptep , pte ;
@@ -116,8 +121,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
116
121
* Only return device mapping pages in the FOLL_GET case since
117
122
* they are only valid while holding the pgmap reference.
118
123
*/
119
- pgmap = get_dev_pagemap (pte_pfn (pte ), NULL );
120
- if (pgmap )
124
+ * pgmap = get_dev_pagemap (pte_pfn (pte ), * pgmap );
125
+ if (* pgmap )
121
126
page = pte_page (pte );
122
127
else
123
128
goto no_page ;
@@ -152,15 +157,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
152
157
goto retry ;
153
158
}
154
159
155
- if (flags & FOLL_GET ) {
160
+ if (flags & FOLL_GET )
156
161
get_page (page );
157
-
158
- /* drop the pgmap reference now that we hold the page */
159
- if (pgmap ) {
160
- put_dev_pagemap (pgmap );
161
- pgmap = NULL ;
162
- }
163
- }
164
162
if (flags & FOLL_TOUCH ) {
165
163
if ((flags & FOLL_WRITE ) &&
166
164
!pte_dirty (pte ) && !PageDirty (page ))
@@ -210,7 +208,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
210
208
211
209
static struct page * follow_pmd_mask (struct vm_area_struct * vma ,
212
210
unsigned long address , pud_t * pudp ,
213
- unsigned int flags , unsigned int * page_mask )
211
+ unsigned int flags ,
212
+ struct follow_page_context * ctx )
214
213
{
215
214
pmd_t * pmd , pmdval ;
216
215
spinlock_t * ptl ;
@@ -258,13 +257,13 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
258
257
}
259
258
if (pmd_devmap (pmdval )) {
260
259
ptl = pmd_lock (mm , pmd );
261
- page = follow_devmap_pmd (vma , address , pmd , flags );
260
+ page = follow_devmap_pmd (vma , address , pmd , flags , & ctx -> pgmap );
262
261
spin_unlock (ptl );
263
262
if (page )
264
263
return page ;
265
264
}
266
265
if (likely (!pmd_trans_huge (pmdval )))
267
- return follow_page_pte (vma , address , pmd , flags );
266
+ return follow_page_pte (vma , address , pmd , flags , & ctx -> pgmap );
268
267
269
268
if ((flags & FOLL_NUMA ) && pmd_protnone (pmdval ))
270
269
return no_page_table (vma , flags );
@@ -284,7 +283,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
284
283
}
285
284
if (unlikely (!pmd_trans_huge (* pmd ))) {
286
285
spin_unlock (ptl );
287
- return follow_page_pte (vma , address , pmd , flags );
286
+ return follow_page_pte (vma , address , pmd , flags , & ctx -> pgmap );
288
287
}
289
288
if (flags & FOLL_SPLIT ) {
290
289
int ret ;
@@ -307,18 +306,18 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
307
306
}
308
307
309
308
return ret ? ERR_PTR (ret ) :
310
- follow_page_pte (vma , address , pmd , flags );
309
+ follow_page_pte (vma , address , pmd , flags , & ctx -> pgmap );
311
310
}
312
311
page = follow_trans_huge_pmd (vma , address , pmd , flags );
313
312
spin_unlock (ptl );
314
- * page_mask = HPAGE_PMD_NR - 1 ;
313
+ ctx -> page_mask = HPAGE_PMD_NR - 1 ;
315
314
return page ;
316
315
}
317
316
318
-
319
317
static struct page * follow_pud_mask (struct vm_area_struct * vma ,
320
318
unsigned long address , p4d_t * p4dp ,
321
- unsigned int flags , unsigned int * page_mask )
319
+ unsigned int flags ,
320
+ struct follow_page_context * ctx )
322
321
{
323
322
pud_t * pud ;
324
323
spinlock_t * ptl ;
@@ -344,21 +343,21 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
344
343
}
345
344
if (pud_devmap (* pud )) {
346
345
ptl = pud_lock (mm , pud );
347
- page = follow_devmap_pud (vma , address , pud , flags );
346
+ page = follow_devmap_pud (vma , address , pud , flags , & ctx -> pgmap );
348
347
spin_unlock (ptl );
349
348
if (page )
350
349
return page ;
351
350
}
352
351
if (unlikely (pud_bad (* pud )))
353
352
return no_page_table (vma , flags );
354
353
355
- return follow_pmd_mask (vma , address , pud , flags , page_mask );
354
+ return follow_pmd_mask (vma , address , pud , flags , ctx );
356
355
}
357
356
358
-
359
357
static struct page * follow_p4d_mask (struct vm_area_struct * vma ,
360
358
unsigned long address , pgd_t * pgdp ,
361
- unsigned int flags , unsigned int * page_mask )
359
+ unsigned int flags ,
360
+ struct follow_page_context * ctx )
362
361
{
363
362
p4d_t * p4d ;
364
363
struct page * page ;
@@ -378,7 +377,7 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
378
377
return page ;
379
378
return no_page_table (vma , flags );
380
379
}
381
- return follow_pud_mask (vma , address , p4d , flags , page_mask );
380
+ return follow_pud_mask (vma , address , p4d , flags , ctx );
382
381
}
383
382
384
383
/**
@@ -396,13 +395,13 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
396
395
*/
397
396
struct page * follow_page_mask (struct vm_area_struct * vma ,
398
397
unsigned long address , unsigned int flags ,
399
- unsigned int * page_mask )
398
+ struct follow_page_context * ctx )
400
399
{
401
400
pgd_t * pgd ;
402
401
struct page * page ;
403
402
struct mm_struct * mm = vma -> vm_mm ;
404
403
405
- * page_mask = 0 ;
404
+ ctx -> page_mask = 0 ;
406
405
407
406
/* make this handle hugepd */
408
407
page = follow_huge_addr (mm , address , flags & FOLL_WRITE );
@@ -431,7 +430,19 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
431
430
return no_page_table (vma , flags );
432
431
}
433
432
434
- return follow_p4d_mask (vma , address , pgd , flags , page_mask );
433
+ return follow_p4d_mask (vma , address , pgd , flags , ctx );
434
+ }
435
+
436
+ struct page * follow_page (struct vm_area_struct * vma , unsigned long address ,
437
+ unsigned int foll_flags )
438
+ {
439
+ struct follow_page_context ctx = { NULL };
440
+ struct page * page ;
441
+
442
+ page = follow_page_mask (vma , address , foll_flags , & ctx );
443
+ if (ctx .pgmap )
444
+ put_dev_pagemap (ctx .pgmap );
445
+ return page ;
435
446
}
436
447
437
448
static int get_gate_page (struct mm_struct * mm , unsigned long address ,
@@ -659,9 +670,9 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
659
670
unsigned int gup_flags , struct page * * pages ,
660
671
struct vm_area_struct * * vmas , int * nonblocking )
661
672
{
662
- long i = 0 ;
663
- unsigned int page_mask ;
673
+ long ret = 0 , i = 0 ;
664
674
struct vm_area_struct * vma = NULL ;
675
+ struct follow_page_context ctx = { NULL };
665
676
666
677
if (!nr_pages )
667
678
return 0 ;
@@ -691,12 +702,14 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
691
702
pages ? & pages [i ] : NULL );
692
703
if (ret )
693
704
return i ? : ret ;
694
- page_mask = 0 ;
705
+ ctx . page_mask = 0 ;
695
706
goto next_page ;
696
707
}
697
708
698
- if (!vma || check_vma_flags (vma , gup_flags ))
699
- return i ? : - EFAULT ;
709
+ if (!vma || check_vma_flags (vma , gup_flags )) {
710
+ ret = - EFAULT ;
711
+ goto out ;
712
+ }
700
713
if (is_vm_hugetlb_page (vma )) {
701
714
i = follow_hugetlb_page (mm , vma , pages , vmas ,
702
715
& start , & nr_pages , i ,
@@ -709,23 +722,26 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
709
722
* If we have a pending SIGKILL, don't keep faulting pages and
710
723
* potentially allocating memory.
711
724
*/
712
- if (unlikely (fatal_signal_pending (current )))
713
- return i ? i : - ERESTARTSYS ;
725
+ if (unlikely (fatal_signal_pending (current ))) {
726
+ ret = - ERESTARTSYS ;
727
+ goto out ;
728
+ }
714
729
cond_resched ();
715
- page = follow_page_mask (vma , start , foll_flags , & page_mask );
730
+
731
+ page = follow_page_mask (vma , start , foll_flags , & ctx );
716
732
if (!page ) {
717
- int ret ;
718
733
ret = faultin_page (tsk , vma , start , & foll_flags ,
719
734
nonblocking );
720
735
switch (ret ) {
721
736
case 0 :
722
737
goto retry ;
738
+ case - EBUSY :
739
+ ret = 0 ;
740
+ /* FALLTHRU */
723
741
case - EFAULT :
724
742
case - ENOMEM :
725
743
case - EHWPOISON :
726
- return i ? i : ret ;
727
- case - EBUSY :
728
- return i ;
744
+ goto out ;
729
745
case - ENOENT :
730
746
goto next_page ;
731
747
}
@@ -737,27 +753,31 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
737
753
*/
738
754
goto next_page ;
739
755
} else if (IS_ERR (page )) {
740
- return i ? i : PTR_ERR (page );
756
+ ret = PTR_ERR (page );
757
+ goto out ;
741
758
}
742
759
if (pages ) {
743
760
pages [i ] = page ;
744
761
flush_anon_page (vma , page , start );
745
762
flush_dcache_page (page );
746
- page_mask = 0 ;
763
+ ctx . page_mask = 0 ;
747
764
}
748
765
next_page :
749
766
if (vmas ) {
750
767
vmas [i ] = vma ;
751
- page_mask = 0 ;
768
+ ctx . page_mask = 0 ;
752
769
}
753
- page_increm = 1 + (~(start >> PAGE_SHIFT ) & page_mask );
770
+ page_increm = 1 + (~(start >> PAGE_SHIFT ) & ctx . page_mask );
754
771
if (page_increm > nr_pages )
755
772
page_increm = nr_pages ;
756
773
i += page_increm ;
757
774
start += page_increm * PAGE_SIZE ;
758
775
nr_pages -= page_increm ;
759
776
} while (nr_pages );
760
- return i ;
777
+ out :
778
+ if (ctx .pgmap )
779
+ put_dev_pagemap (ctx .pgmap );
780
+ return i ? i : ret ;
761
781
}
762
782
763
783
static bool vma_permits_fault (struct vm_area_struct * vma ,
0 commit comments