16
16
#ifndef __ASM_PGTABLE_H
17
17
#define __ASM_PGTABLE_H
18
18
19
+ #include <asm/bug.h>
19
20
#include <asm/proc-fns.h>
20
21
21
22
#include <asm/memory.h>
27
28
#define PTE_VALID (_AT(pteval_t, 1) << 0)
28
29
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
29
30
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
31
+ #ifdef CONFIG_ARM64_HW_AFDBM
32
+ #define PTE_WRITE (PTE_DBM) /* same as DBM */
33
+ #else
30
34
#define PTE_WRITE (_AT(pteval_t, 1) << 57)
35
+ #endif
31
36
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
32
37
33
38
/*
48
53
#define FIRST_USER_ADDRESS 0UL
49
54
50
55
#ifndef __ASSEMBLY__
56
+
57
+ #include <linux/mmdebug.h>
58
+
51
59
extern void __pte_error (const char * file , int line , unsigned long val );
52
60
extern void __pmd_error (const char * file , int line , unsigned long val );
53
61
extern void __pud_error (const char * file , int line , unsigned long val );
@@ -137,12 +145,20 @@ extern struct page *empty_zero_page;
137
145
* The following only work if pte_present(). Undefined behaviour otherwise.
138
146
*/
139
147
#define pte_present (pte ) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
140
- #define pte_dirty (pte ) (!!(pte_val(pte) & PTE_DIRTY))
141
148
#define pte_young (pte ) (!!(pte_val(pte) & PTE_AF))
142
149
#define pte_special (pte ) (!!(pte_val(pte) & PTE_SPECIAL))
143
150
#define pte_write (pte ) (!!(pte_val(pte) & PTE_WRITE))
144
151
#define pte_exec (pte ) (!(pte_val(pte) & PTE_UXN))
145
152
153
+ #ifdef CONFIG_ARM64_HW_AFDBM
154
+ #define pte_hw_dirty (pte ) (!(pte_val(pte) & PTE_RDONLY))
155
+ #else
156
+ #define pte_hw_dirty (pte ) (0)
157
+ #endif
158
+ #define pte_sw_dirty (pte ) (!!(pte_val(pte) & PTE_DIRTY))
159
+ #define pte_dirty (pte ) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
160
+
161
+ #define pte_valid (pte ) (!!(pte_val(pte) && PTE_VALID))
146
162
#define pte_valid_user (pte ) \
147
163
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
148
164
#define pte_valid_not_user (pte ) \
@@ -209,20 +225,49 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
209
225
}
210
226
}
211
227
228
+ struct mm_struct ;
229
+ struct vm_area_struct ;
230
+
212
231
extern void __sync_icache_dcache (pte_t pteval , unsigned long addr );
213
232
233
+ /*
234
+ * PTE bits configuration in the presence of hardware Dirty Bit Management
235
+ * (PTE_WRITE == PTE_DBM):
236
+ *
237
+ * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
238
+ * 0 0 | 1 0 0
239
+ * 0 1 | 1 1 0
240
+ * 1 0 | 1 0 1
241
+ * 1 1 | 0 1 x
242
+ *
243
+ * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
244
+ * the page fault mechanism. Checking the dirty status of a pte becomes:
245
+ *
246
+ * PTE_DIRTY || !PTE_RDONLY
247
+ */
214
248
static inline void set_pte_at (struct mm_struct * mm , unsigned long addr ,
215
249
pte_t * ptep , pte_t pte )
216
250
{
217
251
if (pte_valid_user (pte )) {
218
252
if (!pte_special (pte ) && pte_exec (pte ))
219
253
__sync_icache_dcache (pte , addr );
220
- if (pte_dirty (pte ) && pte_write (pte ))
254
+ if (pte_sw_dirty (pte ) && pte_write (pte ))
221
255
pte_val (pte ) &= ~PTE_RDONLY ;
222
256
else
223
257
pte_val (pte ) |= PTE_RDONLY ;
224
258
}
225
259
260
+ /*
261
+ * If the existing pte is valid, check for potential race with
262
+ * hardware updates of the pte (ptep_set_access_flags safely changes
263
+ * valid ptes without going through an invalid entry).
264
+ */
265
+ if (IS_ENABLED (CONFIG_DEBUG_VM ) && IS_ENABLED (CONFIG_ARM64_HW_AFDBM ) &&
266
+ pte_valid (* ptep )) {
267
+ BUG_ON (!pte_young (pte ));
268
+ BUG_ON (pte_write (* ptep ) && !pte_dirty (pte ));
269
+ }
270
+
226
271
set_pte (ptep , pte );
227
272
}
228
273
@@ -461,6 +506,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
461
506
{
462
507
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
463
508
PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK ;
509
+ /* preserve the hardware dirty information */
510
+ if (pte_hw_dirty (pte ))
511
+ newprot |= PTE_DIRTY ;
464
512
pte_val (pte ) = (pte_val (pte ) & ~mask ) | (pgprot_val (newprot ) & mask );
465
513
return pte ;
466
514
}
@@ -470,6 +518,101 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
470
518
return pte_pmd (pte_modify (pmd_pte (pmd ), newprot ));
471
519
}
472
520
521
+ #ifdef CONFIG_ARM64_HW_AFDBM
522
+ /*
523
+ * Atomic pte/pmd modifications.
524
+ */
525
+ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
526
+ static inline int ptep_test_and_clear_young (struct vm_area_struct * vma ,
527
+ unsigned long address ,
528
+ pte_t * ptep )
529
+ {
530
+ pteval_t pteval ;
531
+ unsigned int tmp , res ;
532
+
533
+ asm volatile ("// ptep_test_and_clear_young\n"
534
+ " prfm pstl1strm, %2\n"
535
+ "1: ldxr %0, %2\n"
536
+ " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n"
537
+ " and %0, %0, %4 // clear PTE_AF\n"
538
+ " stxr %w1, %0, %2\n"
539
+ " cbnz %w1, 1b\n"
540
+ : "=&r" (pteval ), "=&r" (tmp ), "+Q" (pte_val (* ptep )), "=&r" (res )
541
+ : "L" (~PTE_AF ), "I" (ilog2 (PTE_AF )));
542
+
543
+ return res ;
544
+ }
545
+
546
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
547
+ #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
548
+ static inline int pmdp_test_and_clear_young (struct vm_area_struct * vma ,
549
+ unsigned long address ,
550
+ pmd_t * pmdp )
551
+ {
552
+ return ptep_test_and_clear_young (vma , address , (pte_t * )pmdp );
553
+ }
554
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
555
+
556
+ #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
557
+ static inline pte_t ptep_get_and_clear (struct mm_struct * mm ,
558
+ unsigned long address , pte_t * ptep )
559
+ {
560
+ pteval_t old_pteval ;
561
+ unsigned int tmp ;
562
+
563
+ asm volatile ("// ptep_get_and_clear\n"
564
+ " prfm pstl1strm, %2\n"
565
+ "1: ldxr %0, %2\n"
566
+ " stxr %w1, xzr, %2\n"
567
+ " cbnz %w1, 1b\n"
568
+ : "=&r" (old_pteval ), "=&r" (tmp ), "+Q" (pte_val (* ptep )));
569
+
570
+ return __pte (old_pteval );
571
+ }
572
+
573
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
574
+ #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
575
+ static inline pmd_t pmdp_get_and_clear (struct mm_struct * mm ,
576
+ unsigned long address , pmd_t * pmdp )
577
+ {
578
+ return pte_pmd (ptep_get_and_clear (mm , address , (pte_t * )pmdp ));
579
+ }
580
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
581
+
582
+ /*
583
+ * ptep_set_wrprotect - mark read-only while trasferring potential hardware
584
+ * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
585
+ */
586
+ #define __HAVE_ARCH_PTEP_SET_WRPROTECT
587
+ static inline void ptep_set_wrprotect (struct mm_struct * mm , unsigned long address , pte_t * ptep )
588
+ {
589
+ pteval_t pteval ;
590
+ unsigned long tmp ;
591
+
592
+ asm volatile ("// ptep_set_wrprotect\n"
593
+ " prfm pstl1strm, %2\n"
594
+ "1: ldxr %0, %2\n"
595
+ " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n"
596
+ " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n"
597
+ " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n"
598
+ " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n"
599
+ " stxr %w1, %0, %2\n"
600
+ " cbnz %w1, 1b\n"
601
+ : "=&r" (pteval ), "=&r" (tmp ), "+Q" (pte_val (* ptep ))
602
+ : "r" (PTE_DIRTY |PTE_RDONLY ), "L" (PTE_RDONLY ), "L" (~PTE_WRITE )
603
+ : "cc" );
604
+ }
605
+
606
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
607
+ #define __HAVE_ARCH_PMDP_SET_WRPROTECT
608
+ static inline void pmdp_set_wrprotect (struct mm_struct * mm ,
609
+ unsigned long address , pmd_t * pmdp )
610
+ {
611
+ ptep_set_wrprotect (mm , address , (pte_t * )pmdp );
612
+ }
613
+ #endif
614
+ #endif /* CONFIG_ARM64_HW_AFDBM */
615
+
473
616
extern pgd_t swapper_pg_dir [PTRS_PER_PGD ];
474
617
extern pgd_t idmap_pg_dir [PTRS_PER_PGD ];
475
618
0 commit comments