Skip to content

Commit 298fa1a

Browse files
committed
highmem: Provide generic variant of kmap_atomic*
The kmap_atomic* interfaces in all architectures are pretty much the same except for post map operations (flush) and pre- and post unmap operations. Provide a generic variant for that. Signed-off-by: Thomas Gleixner <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Christoph Hellwig <[email protected]> Cc: Andrew Morton <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 4f8b96c commit 298fa1a

File tree

3 files changed

+211
-18
lines changed

3 files changed

+211
-18
lines changed

include/linux/highmem.h

Lines changed: 66 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,16 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
3131

3232
#include <asm/kmap_types.h>
3333

34+
/*
35+
* Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
36+
*/
37+
#ifdef CONFIG_KMAP_LOCAL
38+
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
39+
void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
40+
void kunmap_local_indexed(void *vaddr);
41+
#endif
42+
3443
#ifdef CONFIG_HIGHMEM
35-
extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
36-
extern void kunmap_atomic_high(void *kvaddr);
3744
#include <asm/highmem.h>
3845

3946
#ifndef ARCH_HAS_KMAP_FLUSH_TLB
@@ -81,6 +88,11 @@ static inline void kunmap(struct page *page)
8188
* be used in IRQ contexts, so in some (very limited) cases we need
8289
* it.
8390
*/
91+
92+
#ifndef CONFIG_KMAP_LOCAL
93+
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
94+
void kunmap_atomic_high(void *kvaddr);
95+
8496
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
8597
{
8698
preempt_disable();
@@ -89,7 +101,38 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
89101
return page_address(page);
90102
return kmap_atomic_high_prot(page, prot);
91103
}
92-
#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
104+
105+
static inline void __kunmap_atomic(void *vaddr)
106+
{
107+
kunmap_atomic_high(vaddr);
108+
}
109+
#else /* !CONFIG_KMAP_LOCAL */
110+
111+
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
112+
{
113+
preempt_disable();
114+
pagefault_disable();
115+
return __kmap_local_page_prot(page, prot);
116+
}
117+
118+
static inline void *kmap_atomic_pfn(unsigned long pfn)
119+
{
120+
preempt_disable();
121+
pagefault_disable();
122+
return __kmap_local_pfn_prot(pfn, kmap_prot);
123+
}
124+
125+
static inline void __kunmap_atomic(void *addr)
126+
{
127+
kunmap_local_indexed(addr);
128+
}
129+
130+
#endif /* CONFIG_KMAP_LOCAL */
131+
132+
static inline void *kmap_atomic(struct page *page)
133+
{
134+
return kmap_atomic_prot(page, kmap_prot);
135+
}
93136

94137
/* declarations for linux/mm/highmem.c */
95138
unsigned int nr_free_highpages(void);
@@ -147,25 +190,33 @@ static inline void *kmap_atomic(struct page *page)
147190
pagefault_disable();
148191
return page_address(page);
149192
}
150-
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
151193

152-
static inline void kunmap_atomic_high(void *addr)
194+
static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
195+
{
196+
return kmap_atomic(page);
197+
}
198+
199+
static inline void *kmap_atomic_pfn(unsigned long pfn)
200+
{
201+
return kmap_atomic(pfn_to_page(pfn));
202+
}
203+
204+
static inline void __kunmap_atomic(void *addr)
153205
{
154206
/*
155207
* Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
156-
* handles re-enabling faults + preemption
208+
* handles re-enabling faults and preemption
157209
*/
158210
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
159211
kunmap_flush_on_unmap(addr);
160212
#endif
161213
}
162214

163-
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
164-
165215
#define kmap_flush_unused() do {} while(0)
166216

167217
#endif /* CONFIG_HIGHMEM */
168218

219+
#if !defined(CONFIG_KMAP_LOCAL)
169220
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
170221

171222
DECLARE_PER_CPU(int, __kmap_atomic_idx);
@@ -196,22 +247,21 @@ static inline void kmap_atomic_idx_pop(void)
196247
__this_cpu_dec(__kmap_atomic_idx);
197248
#endif
198249
}
199-
250+
#endif
200251
#endif
201252

202253
/*
203254
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
204255
* kunmap_atomic() should get the return value of kmap_atomic, not the page.
205256
*/
206-
#define kunmap_atomic(addr) \
207-
do { \
208-
BUILD_BUG_ON(__same_type((addr), struct page *)); \
209-
kunmap_atomic_high(addr); \
210-
pagefault_enable(); \
211-
preempt_enable(); \
257+
#define kunmap_atomic(__addr) \
258+
do { \
259+
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
260+
__kunmap_atomic(__addr); \
261+
pagefault_enable(); \
262+
preempt_enable(); \
212263
} while (0)
213264

214-
215265
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
216266
#ifndef clear_user_highpage
217267
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)

mm/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -872,4 +872,7 @@ config ARCH_HAS_HUGEPD
872872
config MAPPING_DIRTY_HELPERS
873873
bool
874874

875+
config KMAP_LOCAL
876+
bool
877+
875878
endmenu

mm/highmem.c

Lines changed: 142 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,11 @@
3131
#include <asm/tlbflush.h>
3232
#include <linux/vmalloc.h>
3333

34+
#ifndef CONFIG_KMAP_LOCAL
3435
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
3536
DEFINE_PER_CPU(int, __kmap_atomic_idx);
3637
#endif
38+
#endif
3739

3840
/*
3941
* Virtual_count is not a pure "count".
@@ -365,9 +367,147 @@ void kunmap_high(struct page *page)
365367
if (need_wakeup)
366368
wake_up(pkmap_map_wait);
367369
}
368-
369370
EXPORT_SYMBOL(kunmap_high);
370-
#endif /* CONFIG_HIGHMEM */
371+
#endif /* CONFIG_HIGHMEM */
372+
373+
#ifdef CONFIG_KMAP_LOCAL
374+
375+
#include <asm/kmap_size.h>
376+
377+
static DEFINE_PER_CPU(int, __kmap_local_idx);
378+
379+
static inline int kmap_local_idx_push(void)
380+
{
381+
int idx = __this_cpu_inc_return(__kmap_local_idx) - 1;
382+
383+
WARN_ON_ONCE(in_irq() && !irqs_disabled());
384+
BUG_ON(idx >= KM_MAX_IDX);
385+
return idx;
386+
}
387+
388+
static inline int kmap_local_idx(void)
389+
{
390+
return __this_cpu_read(__kmap_local_idx) - 1;
391+
}
392+
393+
static inline void kmap_local_idx_pop(void)
394+
{
395+
int idx = __this_cpu_dec_return(__kmap_local_idx);
396+
397+
BUG_ON(idx < 0);
398+
}
399+
400+
#ifndef arch_kmap_local_post_map
401+
# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
402+
#endif
403+
#ifndef arch_kmap_local_pre_unmap
404+
# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
405+
#endif
406+
407+
#ifndef arch_kmap_local_post_unmap
408+
# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
409+
#endif
410+
411+
#ifndef arch_kmap_local_map_idx
412+
#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
413+
#endif
414+
415+
#ifndef arch_kmap_local_unmap_idx
416+
#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
417+
#endif
418+
419+
#ifndef arch_kmap_local_high_get
420+
static inline void *arch_kmap_local_high_get(struct page *page)
421+
{
422+
return NULL;
423+
}
424+
#endif
425+
426+
/* Unmap a local mapping which was obtained by kmap_high_get() */
427+
static inline void kmap_high_unmap_local(unsigned long vaddr)
428+
{
429+
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
430+
if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP))
431+
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
432+
#endif
433+
}
434+
435+
static inline int kmap_local_calc_idx(int idx)
436+
{
437+
return idx + KM_MAX_IDX * smp_processor_id();
438+
}
439+
440+
static pte_t *__kmap_pte;
441+
442+
static pte_t *kmap_get_pte(void)
443+
{
444+
if (!__kmap_pte)
445+
__kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
446+
return __kmap_pte;
447+
}
448+
449+
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
450+
{
451+
pte_t pteval, *kmap_pte = kmap_get_pte();
452+
unsigned long vaddr;
453+
int idx;
454+
455+
preempt_disable();
456+
idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
457+
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
458+
BUG_ON(!pte_none(*(kmap_pte - idx)));
459+
pteval = pfn_pte(pfn, prot);
460+
set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
461+
arch_kmap_local_post_map(vaddr, pteval);
462+
preempt_enable();
463+
464+
return (void *)vaddr;
465+
}
466+
EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
467+
468+
void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
469+
{
470+
void *kmap;
471+
472+
if (!PageHighMem(page))
473+
return page_address(page);
474+
475+
/* Try kmap_high_get() if architecture has it enabled */
476+
kmap = arch_kmap_local_high_get(page);
477+
if (kmap)
478+
return kmap;
479+
480+
return __kmap_local_pfn_prot(page_to_pfn(page), prot);
481+
}
482+
EXPORT_SYMBOL(__kmap_local_page_prot);
483+
484+
void kunmap_local_indexed(void *vaddr)
485+
{
486+
unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
487+
pte_t *kmap_pte = kmap_get_pte();
488+
int idx;
489+
490+
if (addr < __fix_to_virt(FIX_KMAP_END) ||
491+
addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
492+
WARN_ON_ONCE(addr < PAGE_OFFSET);
493+
494+
/* Handle mappings which were obtained by kmap_high_get() */
495+
kmap_high_unmap_local(addr);
496+
return;
497+
}
498+
499+
preempt_disable();
500+
idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
501+
WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
502+
503+
arch_kmap_local_pre_unmap(addr);
504+
pte_clear(&init_mm, addr, kmap_pte - idx);
505+
arch_kmap_local_post_unmap(addr);
506+
kmap_local_idx_pop();
507+
preempt_enable();
508+
}
509+
EXPORT_SYMBOL(kunmap_local_indexed);
510+
#endif
371511

372512
#if defined(HASHED_PAGE_VIRTUAL)
373513

0 commit comments

Comments
 (0)