114
114
#include <asm/pgtable-32.h>
115
115
#endif /* CONFIG_64BIT */
116
116
117
+ #include <linux/page_table_check.h>
118
+
117
119
#ifdef CONFIG_XIP_KERNEL
118
120
#define XIP_FIXUP (addr ) ({ \
119
121
uintptr_t __a = (uintptr_t)(addr); \
@@ -315,6 +317,11 @@ static inline int pte_exec(pte_t pte)
315
317
return pte_val (pte ) & _PAGE_EXEC ;
316
318
}
317
319
320
+ static inline int pte_user (pte_t pte )
321
+ {
322
+ return pte_val (pte ) & _PAGE_USER ;
323
+ }
324
+
318
325
static inline int pte_huge (pte_t pte )
319
326
{
320
327
return pte_present (pte ) && (pte_val (pte ) & _PAGE_LEAF );
@@ -446,7 +453,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
446
453
447
454
void flush_icache_pte (pte_t pte );
448
455
449
- static inline void set_pte_at (struct mm_struct * mm ,
456
+ static inline void __set_pte_at (struct mm_struct * mm ,
450
457
unsigned long addr , pte_t * ptep , pte_t pteval )
451
458
{
452
459
if (pte_present (pteval ) && pte_exec (pteval ))
@@ -455,10 +462,17 @@ static inline void set_pte_at(struct mm_struct *mm,
455
462
set_pte (ptep , pteval );
456
463
}
457
464
465
+ static inline void set_pte_at (struct mm_struct * mm ,
466
+ unsigned long addr , pte_t * ptep , pte_t pteval )
467
+ {
468
+ page_table_check_pte_set (mm , addr , ptep , pteval );
469
+ __set_pte_at (mm , addr , ptep , pteval );
470
+ }
471
+
458
472
static inline void pte_clear (struct mm_struct * mm ,
459
473
unsigned long addr , pte_t * ptep )
460
474
{
461
- set_pte_at (mm , addr , ptep , __pte (0 ));
475
+ __set_pte_at (mm , addr , ptep , __pte (0 ));
462
476
}
463
477
464
478
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
@@ -479,7 +493,11 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
479
493
static inline pte_t ptep_get_and_clear (struct mm_struct * mm ,
480
494
unsigned long address , pte_t * ptep )
481
495
{
482
- return __pte (atomic_long_xchg ((atomic_long_t * )ptep , 0 ));
496
+ pte_t pte = __pte (atomic_long_xchg ((atomic_long_t * )ptep , 0 ));
497
+
498
+ page_table_check_pte_clear (mm , address , pte );
499
+
500
+ return pte ;
483
501
}
484
502
485
503
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -546,6 +564,13 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
546
564
return ((__pmd_to_phys (pmd ) & PMD_MASK ) >> PAGE_SHIFT );
547
565
}
548
566
567
+ #define __pud_to_phys (pud ) (pud_val(pud) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
568
+
569
+ static inline unsigned long pud_pfn (pud_t pud )
570
+ {
571
+ return ((__pud_to_phys (pud ) & PUD_MASK ) >> PAGE_SHIFT );
572
+ }
573
+
549
574
static inline pmd_t pmd_modify (pmd_t pmd , pgprot_t newprot )
550
575
{
551
576
return pte_pmd (pte_modify (pmd_pte (pmd ), newprot ));
@@ -567,6 +592,11 @@ static inline int pmd_young(pmd_t pmd)
567
592
return pte_young (pmd_pte (pmd ));
568
593
}
569
594
595
+ static inline int pmd_user (pmd_t pmd )
596
+ {
597
+ return pte_user (pmd_pte (pmd ));
598
+ }
599
+
570
600
static inline pmd_t pmd_mkold (pmd_t pmd )
571
601
{
572
602
return pte_pmd (pte_mkold (pmd_pte (pmd )));
@@ -600,15 +630,39 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
600
630
static inline void set_pmd_at (struct mm_struct * mm , unsigned long addr ,
601
631
pmd_t * pmdp , pmd_t pmd )
602
632
{
603
- return set_pte_at (mm , addr , (pte_t * )pmdp , pmd_pte (pmd ));
633
+ page_table_check_pmd_set (mm , addr , pmdp , pmd );
634
+ return __set_pte_at (mm , addr , (pte_t * )pmdp , pmd_pte (pmd ));
635
+ }
636
+
637
+ static inline int pud_user (pud_t pud )
638
+ {
639
+ return pte_user (pud_pte (pud ));
604
640
}
605
641
606
642
static inline void set_pud_at (struct mm_struct * mm , unsigned long addr ,
607
643
pud_t * pudp , pud_t pud )
608
644
{
609
- return set_pte_at (mm , addr , (pte_t * )pudp , pud_pte (pud ));
645
+ page_table_check_pud_set (mm , addr , pudp , pud );
646
+ return __set_pte_at (mm , addr , (pte_t * )pudp , pud_pte (pud ));
647
+ }
648
+
649
+ #ifdef CONFIG_PAGE_TABLE_CHECK
650
+ static inline bool pte_user_accessible_page (pte_t pte )
651
+ {
652
+ return pte_present (pte ) && pte_user (pte );
653
+ }
654
+
655
+ static inline bool pmd_user_accessible_page (pmd_t pmd )
656
+ {
657
+ return pmd_leaf (pmd ) && pmd_user (pmd );
610
658
}
611
659
660
+ static inline bool pud_user_accessible_page (pud_t pud )
661
+ {
662
+ return pud_leaf (pud ) && pud_user (pud );
663
+ }
664
+ #endif
665
+
612
666
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
613
667
static inline int pmd_trans_huge (pmd_t pmd )
614
668
{
@@ -634,7 +688,11 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
634
688
static inline pmd_t pmdp_huge_get_and_clear (struct mm_struct * mm ,
635
689
unsigned long address , pmd_t * pmdp )
636
690
{
637
- return pte_pmd (ptep_get_and_clear (mm , address , (pte_t * )pmdp ));
691
+ pmd_t pmd = __pmd (atomic_long_xchg ((atomic_long_t * )pmdp , 0 ));
692
+
693
+ page_table_check_pmd_clear (mm , address , pmd );
694
+
695
+ return pmd ;
638
696
}
639
697
640
698
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -648,6 +706,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
648
706
static inline pmd_t pmdp_establish (struct vm_area_struct * vma ,
649
707
unsigned long address , pmd_t * pmdp , pmd_t pmd )
650
708
{
709
+ page_table_check_pmd_set (vma -> vm_mm , address , pmdp , pmd );
651
710
return __pmd (atomic_long_xchg ((atomic_long_t * )pmdp , pmd_val (pmd )));
652
711
}
653
712
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
0 commit comments