@@ -637,14 +637,9 @@ EXPORT_SYMBOL_GPL(linear_hugepage_index);
637
637
*/
638
638
unsigned long vma_kernel_pagesize (struct vm_area_struct * vma )
639
639
{
640
- struct hstate * hstate ;
641
-
642
- if (!is_vm_hugetlb_page (vma ))
643
- return PAGE_SIZE ;
644
-
645
- hstate = hstate_vma (vma );
646
-
647
- return 1UL << huge_page_shift (hstate );
640
+ if (vma -> vm_ops && vma -> vm_ops -> pagesize )
641
+ return vma -> vm_ops -> pagesize (vma );
642
+ return PAGE_SIZE ;
648
643
}
649
644
EXPORT_SYMBOL_GPL (vma_kernel_pagesize );
650
645
@@ -3151,6 +3146,13 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3151
3146
return 0 ;
3152
3147
}
3153
3148
3149
+ static unsigned long hugetlb_vm_op_pagesize (struct vm_area_struct * vma )
3150
+ {
3151
+ struct hstate * hstate = hstate_vma (vma );
3152
+
3153
+ return 1UL << huge_page_shift (hstate );
3154
+ }
3155
+
3154
3156
/*
3155
3157
* We cannot handle pagefaults against hugetlb pages at all. They cause
3156
3158
* handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3168,6 +3170,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
3168
3170
.open = hugetlb_vm_op_open ,
3169
3171
.close = hugetlb_vm_op_close ,
3170
3172
.split = hugetlb_vm_op_split ,
3173
+ .pagesize = hugetlb_vm_op_pagesize ,
3171
3174
};
3172
3175
3173
3176
static pte_t make_huge_pte (struct vm_area_struct * vma , struct page * page ,
0 commit comments