Skip to content

Commit 07037db

Browse files
RISC-V: Paging and MMU
This patch contains code to manage the RISC-V MMU, including definitions of the page tables and the page walking code. Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 6d60b6e commit 07037db

File tree

8 files changed

+1192
-0
lines changed

8 files changed

+1192
-0
lines changed

arch/riscv/include/asm/mmu_context.h

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/*
2+
* Copyright (C) 2012 Regents of the University of California
3+
*
4+
* This program is free software; you can redistribute it and/or
5+
* modify it under the terms of the GNU General Public License
6+
* as published by the Free Software Foundation, version 2.
7+
*
8+
* This program is distributed in the hope that it will be useful,
9+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
10+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11+
* GNU General Public License for more details.
12+
*/
13+
14+
#ifndef _ASM_RISCV_MMU_CONTEXT_H
15+
#define _ASM_RISCV_MMU_CONTEXT_H
16+
17+
#include <asm-generic/mm_hooks.h>
18+
19+
#include <linux/mm.h>
20+
#include <linux/sched.h>
21+
#include <asm/tlbflush.h>
22+
23+
static inline void enter_lazy_tlb(struct mm_struct *mm,
24+
struct task_struct *task)
25+
{
26+
}
27+
28+
/* Initialize context-related info for a new mm_struct */
29+
static inline int init_new_context(struct task_struct *task,
30+
struct mm_struct *mm)
31+
{
32+
return 0;
33+
}
34+
35+
static inline void destroy_context(struct mm_struct *mm)
36+
{
37+
}
38+
39+
static inline pgd_t *current_pgdir(void)
40+
{
41+
return pfn_to_virt(csr_read(sptbr) & SPTBR_PPN);
42+
}
43+
44+
static inline void set_pgdir(pgd_t *pgd)
45+
{
46+
csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE);
47+
}
48+
49+
static inline void switch_mm(struct mm_struct *prev,
50+
struct mm_struct *next, struct task_struct *task)
51+
{
52+
if (likely(prev != next)) {
53+
set_pgdir(next->pgd);
54+
local_flush_tlb_all();
55+
}
56+
}
57+
58+
static inline void activate_mm(struct mm_struct *prev,
59+
struct mm_struct *next)
60+
{
61+
switch_mm(prev, next, NULL);
62+
}
63+
64+
static inline void deactivate_mm(struct task_struct *task,
65+
struct mm_struct *mm)
66+
{
67+
}
68+
69+
#endif /* _ASM_RISCV_MMU_CONTEXT_H */

arch/riscv/include/asm/page.h

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
/*
2+
* Copyright (C) 2009 Chen Liqin <[email protected]>
3+
* Copyright (C) 2012 Regents of the University of California
4+
* Copyright (C) 2017 SiFive
5+
* Copyright (C) 2017 XiaojingZhu <[email protected]>
6+
*
7+
* This program is free software; you can redistribute it and/or
8+
* modify it under the terms of the GNU General Public License
9+
* as published by the Free Software Foundation, version 2.
10+
*
11+
* This program is distributed in the hope that it will be useful,
12+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14+
* GNU General Public License for more details.
15+
*/
16+
17+
#ifndef _ASM_RISCV_PAGE_H
18+
#define _ASM_RISCV_PAGE_H
19+
20+
#include <linux/pfn.h>
21+
#include <linux/const.h>
22+
23+
#define PAGE_SHIFT (12)
24+
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
25+
#define PAGE_MASK (~(PAGE_SIZE - 1))
26+
27+
/*
28+
* PAGE_OFFSET -- the first address of the first page of memory.
29+
* When not using MMU this corresponds to the first free page in
30+
* physical memory (aligned on a page boundary).
31+
*/
32+
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
33+
34+
#define KERN_VIRT_SIZE (-PAGE_OFFSET)
35+
36+
#ifndef __ASSEMBLY__
37+
38+
#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
39+
#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
40+
41+
/* align addr on a size boundary - adjust address up/down if needed */
42+
#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
43+
#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
44+
45+
/* align addr on a size boundary - adjust address up if needed */
46+
#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
47+
48+
#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
49+
#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
50+
51+
#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
52+
#define copy_user_page(vto, vfrom, vaddr, topg) \
53+
memcpy((vto), (vfrom), PAGE_SIZE)
54+
55+
/*
56+
* Use struct definitions to apply C type checking
57+
*/
58+
59+
/* Page Global Directory entry */
60+
typedef struct {
61+
unsigned long pgd;
62+
} pgd_t;
63+
64+
/* Page Table entry */
65+
typedef struct {
66+
unsigned long pte;
67+
} pte_t;
68+
69+
typedef struct {
70+
unsigned long pgprot;
71+
} pgprot_t;
72+
73+
typedef struct page *pgtable_t;
74+
75+
#define pte_val(x) ((x).pte)
76+
#define pgd_val(x) ((x).pgd)
77+
#define pgprot_val(x) ((x).pgprot)
78+
79+
#define __pte(x) ((pte_t) { (x) })
80+
#define __pgd(x) ((pgd_t) { (x) })
81+
#define __pgprot(x) ((pgprot_t) { (x) })
82+
83+
#ifdef CONFIG_64BITS
84+
#define PTE_FMT "%016lx"
85+
#else
86+
#define PTE_FMT "%08lx"
87+
#endif
88+
89+
extern unsigned long va_pa_offset;
90+
extern unsigned long pfn_base;
91+
92+
extern unsigned long max_low_pfn;
93+
extern unsigned long min_low_pfn;
94+
95+
#define __pa(x) ((unsigned long)(x) - va_pa_offset)
96+
#define __va(x) ((void *)((unsigned long) (x) + va_pa_offset))
97+
98+
#define phys_to_pfn(phys) (PFN_DOWN(phys))
99+
#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
100+
101+
#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
102+
#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
103+
104+
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
105+
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
106+
107+
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
108+
#define page_to_bus(page) (page_to_phys(page))
109+
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
110+
111+
#define pfn_valid(pfn) \
112+
(((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
113+
114+
#define ARCH_PFN_OFFSET (pfn_base)
115+
116+
#endif /* __ASSEMBLY__ */
117+
118+
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
119+
120+
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
121+
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
122+
123+
#include <asm-generic/memory_model.h>
124+
#include <asm-generic/getorder.h>
125+
126+
/* vDSO support */
127+
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
128+
#define __HAVE_ARCH_GATE_AREA
129+
130+
#endif /* _ASM_RISCV_PAGE_H */

arch/riscv/include/asm/pgalloc.h

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
/*
2+
* Copyright (C) 2009 Chen Liqin <[email protected]>
3+
* Copyright (C) 2012 Regents of the University of California
4+
*
5+
* This program is free software; you can redistribute it and/or
6+
* modify it under the terms of the GNU General Public License
7+
* as published by the Free Software Foundation, version 2.
8+
*
9+
* This program is distributed in the hope that it will be useful,
10+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12+
* GNU General Public License for more details.
13+
*/
14+
15+
#ifndef _ASM_RISCV_PGALLOC_H
16+
#define _ASM_RISCV_PGALLOC_H
17+
18+
#include <linux/mm.h>
19+
#include <asm/tlb.h>
20+
21+
static inline void pmd_populate_kernel(struct mm_struct *mm,
22+
pmd_t *pmd, pte_t *pte)
23+
{
24+
unsigned long pfn = virt_to_pfn(pte);
25+
26+
set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
27+
}
28+
29+
static inline void pmd_populate(struct mm_struct *mm,
30+
pmd_t *pmd, pgtable_t pte)
31+
{
32+
unsigned long pfn = virt_to_pfn(page_address(pte));
33+
34+
set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
35+
}
36+
37+
#ifndef __PAGETABLE_PMD_FOLDED
38+
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
39+
{
40+
unsigned long pfn = virt_to_pfn(pmd);
41+
42+
set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
43+
}
44+
#endif /* __PAGETABLE_PMD_FOLDED */
45+
46+
#define pmd_pgtable(pmd) pmd_page(pmd)
47+
48+
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49+
{
50+
pgd_t *pgd;
51+
52+
pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
53+
if (likely(pgd != NULL)) {
54+
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
55+
/* Copy kernel mappings */
56+
memcpy(pgd + USER_PTRS_PER_PGD,
57+
init_mm.pgd + USER_PTRS_PER_PGD,
58+
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
59+
}
60+
return pgd;
61+
}
62+
63+
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
64+
{
65+
free_page((unsigned long)pgd);
66+
}
67+
68+
#ifndef __PAGETABLE_PMD_FOLDED
69+
70+
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
71+
{
72+
return (pmd_t *)__get_free_page(
73+
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
74+
}
75+
76+
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
77+
{
78+
free_page((unsigned long)pmd);
79+
}
80+
81+
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
82+
83+
#endif /* __PAGETABLE_PMD_FOLDED */
84+
85+
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
86+
unsigned long address)
87+
{
88+
return (pte_t *)__get_free_page(
89+
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
90+
}
91+
92+
static inline struct page *pte_alloc_one(struct mm_struct *mm,
93+
unsigned long address)
94+
{
95+
struct page *pte;
96+
97+
pte = alloc_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
98+
if (likely(pte != NULL))
99+
pgtable_page_ctor(pte);
100+
return pte;
101+
}
102+
103+
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
104+
{
105+
free_page((unsigned long)pte);
106+
}
107+
108+
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
109+
{
110+
pgtable_page_dtor(pte);
111+
__free_page(pte);
112+
}
113+
114+
#define __pte_free_tlb(tlb, pte, buf) \
115+
do { \
116+
pgtable_page_dtor(pte); \
117+
tlb_remove_page((tlb), pte); \
118+
} while (0)
119+
120+
static inline void check_pgt_cache(void)
121+
{
122+
}
123+
124+
#endif /* _ASM_RISCV_PGALLOC_H */

arch/riscv/include/asm/pgtable-32.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
/*
2+
* Copyright (C) 2012 Regents of the University of California
3+
*
4+
* This program is free software; you can redistribute it and/or
5+
* modify it under the terms of the GNU General Public License
6+
* as published by the Free Software Foundation, version 2.
7+
*
8+
* This program is distributed in the hope that it will be useful,
9+
* but WITHOUT ANY WARRANTY; without even the implied warranty of
10+
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11+
* GNU General Public License for more details.
12+
*/
13+
14+
#ifndef _ASM_RISCV_PGTABLE_32_H
15+
#define _ASM_RISCV_PGTABLE_32_H
16+
17+
#include <asm-generic/pgtable-nopmd.h>
18+
#include <linux/const.h>
19+
20+
/* Size of region mapped by a page global directory */
21+
#define PGDIR_SHIFT 22
22+
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
23+
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
24+
25+
#endif /* _ASM_RISCV_PGTABLE_32_H */

0 commit comments

Comments
 (0)