@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
57
57
static unsigned int ecc_mask __initdata = 0 ;
58
58
pgprot_t pgprot_user ;
59
59
pgprot_t pgprot_kernel ;
60
+ pgprot_t pgprot_hyp_device ;
61
+ pgprot_t pgprot_s2 ;
62
+ pgprot_t pgprot_s2_device ;
60
63
61
64
EXPORT_SYMBOL (pgprot_user );
62
65
EXPORT_SYMBOL (pgprot_kernel );
@@ -66,34 +69,46 @@ struct cachepolicy {
66
69
unsigned int cr_mask ;
67
70
pmdval_t pmd ;
68
71
pteval_t pte ;
72
+ pteval_t pte_s2 ;
69
73
};
70
74
75
+ #ifdef CONFIG_ARM_LPAE
76
+ #define s2_policy (policy ) policy
77
+ #else
78
+ #define s2_policy (policy ) 0
79
+ #endif
80
+
71
81
static struct cachepolicy cache_policies [] __initdata = {
72
82
{
73
83
.policy = "uncached" ,
74
84
.cr_mask = CR_W |CR_C ,
75
85
.pmd = PMD_SECT_UNCACHED ,
76
86
.pte = L_PTE_MT_UNCACHED ,
87
+ .pte_s2 = s2_policy (L_PTE_S2_MT_UNCACHED ),
77
88
}, {
78
89
.policy = "buffered" ,
79
90
.cr_mask = CR_C ,
80
91
.pmd = PMD_SECT_BUFFERED ,
81
92
.pte = L_PTE_MT_BUFFERABLE ,
93
+ .pte_s2 = s2_policy (L_PTE_S2_MT_UNCACHED ),
82
94
}, {
83
95
.policy = "writethrough" ,
84
96
.cr_mask = 0 ,
85
97
.pmd = PMD_SECT_WT ,
86
98
.pte = L_PTE_MT_WRITETHROUGH ,
99
+ .pte_s2 = s2_policy (L_PTE_S2_MT_WRITETHROUGH ),
87
100
}, {
88
101
.policy = "writeback" ,
89
102
.cr_mask = 0 ,
90
103
.pmd = PMD_SECT_WB ,
91
104
.pte = L_PTE_MT_WRITEBACK ,
105
+ .pte_s2 = s2_policy (L_PTE_S2_MT_WRITEBACK ),
92
106
}, {
93
107
.policy = "writealloc" ,
94
108
.cr_mask = 0 ,
95
109
.pmd = PMD_SECT_WBWA ,
96
110
.pte = L_PTE_MT_WRITEALLOC ,
111
+ .pte_s2 = s2_policy (L_PTE_S2_MT_WRITEBACK ),
97
112
}
98
113
};
99
114
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
310
325
struct cachepolicy * cp ;
311
326
unsigned int cr = get_cr ();
312
327
pteval_t user_pgprot , kern_pgprot , vecs_pgprot ;
328
+ pteval_t hyp_device_pgprot , s2_pgprot , s2_device_pgprot ;
313
329
int cpu_arch = cpu_architecture ();
314
330
int i ;
315
331
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
421
437
*/
422
438
cp = & cache_policies [cachepolicy ];
423
439
vecs_pgprot = kern_pgprot = user_pgprot = cp -> pte ;
440
+ s2_pgprot = cp -> pte_s2 ;
441
+ hyp_device_pgprot = s2_device_pgprot = mem_types [MT_DEVICE ].prot_pte ;
424
442
425
443
/*
426
444
* ARMv6 and above have extended page tables.
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
444
462
user_pgprot |= L_PTE_SHARED ;
445
463
kern_pgprot |= L_PTE_SHARED ;
446
464
vecs_pgprot |= L_PTE_SHARED ;
465
+ s2_pgprot |= L_PTE_SHARED ;
447
466
mem_types [MT_DEVICE_WC ].prot_sect |= PMD_SECT_S ;
448
467
mem_types [MT_DEVICE_WC ].prot_pte |= L_PTE_SHARED ;
449
468
mem_types [MT_DEVICE_CACHED ].prot_sect |= PMD_SECT_S ;
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
498
517
pgprot_user = __pgprot (L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot );
499
518
pgprot_kernel = __pgprot (L_PTE_PRESENT | L_PTE_YOUNG |
500
519
L_PTE_DIRTY | kern_pgprot );
520
+ pgprot_s2 = __pgprot (L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot );
521
+ pgprot_s2_device = __pgprot (s2_device_pgprot );
522
+ pgprot_hyp_device = __pgprot (hyp_device_pgprot );
501
523
502
524
mem_types [MT_LOW_VECTORS ].prot_l1 |= ecc_mask ;
503
525
mem_types [MT_HIGH_VECTORS ].prot_l1 |= ecc_mask ;
0 commit comments