Skip to content

Commit d239865

Browse files
wildea01torvalds
authored andcommitted
ioremap: rework pXd_free_pYd_page() API
The recently merged API for ensuring break-before-make on page-table entries when installing huge mappings in the vmalloc/ioremap region is fairly counter-intuitive, resulting in the arch freeing functions (e.g. pmd_free_pte_page()) being called even on entries that aren't present. This resulted in a minor bug in the arm64 implementation, giving rise to spurious VM_WARN messages. This patch moves the pXd_present() checks out into the core code, refactoring the callsites at the same time so that we avoid the complex conjunctions when determining whether or not we can put down a huge mapping. Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]> Reviewed-by: Toshi Kani <[email protected]> Suggested-by: Linus Torvalds <[email protected]> Cc: Chintan Pandya <[email protected]> Cc: Toshi Kani <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Sean Christopherson <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent c16eb00 commit d239865

File tree

1 file changed

+42
-14
lines changed

1 file changed

+42
-14
lines changed

lib/ioremap.c

Lines changed: 42 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,25 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
7676
return 0;
7777
}
7878

79+
static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
80+
unsigned long end, phys_addr_t phys_addr,
81+
pgprot_t prot)
82+
{
83+
if (!ioremap_pmd_enabled())
84+
return 0;
85+
86+
if ((end - addr) != PMD_SIZE)
87+
return 0;
88+
89+
if (!IS_ALIGNED(phys_addr, PMD_SIZE))
90+
return 0;
91+
92+
if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
93+
return 0;
94+
95+
return pmd_set_huge(pmd, phys_addr, prot);
96+
}
97+
7998
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
8099
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
81100
{
@@ -89,20 +108,34 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
89108
do {
90109
next = pmd_addr_end(addr, end);
91110

92-
if (ioremap_pmd_enabled() &&
93-
((next - addr) == PMD_SIZE) &&
94-
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
95-
pmd_free_pte_page(pmd, addr)) {
96-
if (pmd_set_huge(pmd, phys_addr + addr, prot))
97-
continue;
98-
}
111+
if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr + addr, prot))
112+
continue;
99113

100114
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
101115
return -ENOMEM;
102116
} while (pmd++, addr = next, addr != end);
103117
return 0;
104118
}
105119

120+
static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
121+
unsigned long end, phys_addr_t phys_addr,
122+
pgprot_t prot)
123+
{
124+
if (!ioremap_pud_enabled())
125+
return 0;
126+
127+
if ((end - addr) != PUD_SIZE)
128+
return 0;
129+
130+
if (!IS_ALIGNED(phys_addr, PUD_SIZE))
131+
return 0;
132+
133+
if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
134+
return 0;
135+
136+
return pud_set_huge(pud, phys_addr, prot);
137+
}
138+
106139
static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
107140
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
108141
{
@@ -116,13 +149,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
116149
do {
117150
next = pud_addr_end(addr, end);
118151

119-
if (ioremap_pud_enabled() &&
120-
((next - addr) == PUD_SIZE) &&
121-
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
122-
pud_free_pmd_page(pud, addr)) {
123-
if (pud_set_huge(pud, phys_addr + addr, prot))
124-
continue;
125-
}
152+
if (ioremap_try_huge_pud(pud, addr, next, phys_addr + addr, prot))
153+
continue;
126154

127155
if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
128156
return -ENOMEM;

0 commit comments

Comments
 (0)