Skip to content

Commit 09cfefb

Browse files
committed
LoongArch: Add memory management
Add memory management support for LoongArch, including: cache and tlb management, page fault handling and ioremap/mmap support. Reviewed-by: WANG Xuerui <[email protected]> Reviewed-by: Jiaxun Yang <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent 803b0fc commit 09cfefb

26 files changed

+3172
-0
lines changed

arch/loongarch/include/asm/cache.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
#ifndef _ASM_CACHE_H
6+
#define _ASM_CACHE_H
7+
8+
#define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT
9+
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10+
11+
#define __read_mostly __section(".data..read_mostly")
12+
13+
#endif /* _ASM_CACHE_H */
Lines changed: 80 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,80 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
#ifndef _ASM_CACHEFLUSH_H
6+
#define _ASM_CACHEFLUSH_H
7+
8+
#include <linux/mm.h>
9+
#include <asm/cpu-features.h>
10+
#include <asm/cacheops.h>
11+
12+
extern void local_flush_icache_range(unsigned long start, unsigned long end);
13+
14+
#define flush_icache_range local_flush_icache_range
15+
#define flush_icache_user_range local_flush_icache_range
16+
17+
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
18+
19+
#define flush_cache_all() do { } while (0)
20+
#define flush_cache_mm(mm) do { } while (0)
21+
#define flush_cache_dup_mm(mm) do { } while (0)
22+
#define flush_cache_range(vma, start, end) do { } while (0)
23+
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
24+
#define flush_cache_vmap(start, end) do { } while (0)
25+
#define flush_cache_vunmap(start, end) do { } while (0)
26+
#define flush_icache_page(vma, page) do { } while (0)
27+
#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
28+
#define flush_dcache_page(page) do { } while (0)
29+
#define flush_dcache_mmap_lock(mapping) do { } while (0)
30+
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
31+
32+
#define cache_op(op, addr) \
33+
__asm__ __volatile__( \
34+
" cacop %0, %1 \n" \
35+
: \
36+
: "i" (op), "ZC" (*(unsigned char *)(addr)))
37+
38+
static inline void flush_icache_line_indexed(unsigned long addr)
39+
{
40+
cache_op(Index_Invalidate_I, addr);
41+
}
42+
43+
static inline void flush_dcache_line_indexed(unsigned long addr)
44+
{
45+
cache_op(Index_Writeback_Inv_D, addr);
46+
}
47+
48+
static inline void flush_vcache_line_indexed(unsigned long addr)
49+
{
50+
cache_op(Index_Writeback_Inv_V, addr);
51+
}
52+
53+
static inline void flush_scache_line_indexed(unsigned long addr)
54+
{
55+
cache_op(Index_Writeback_Inv_S, addr);
56+
}
57+
58+
static inline void flush_icache_line(unsigned long addr)
59+
{
60+
cache_op(Hit_Invalidate_I, addr);
61+
}
62+
63+
static inline void flush_dcache_line(unsigned long addr)
64+
{
65+
cache_op(Hit_Writeback_Inv_D, addr);
66+
}
67+
68+
static inline void flush_vcache_line(unsigned long addr)
69+
{
70+
cache_op(Hit_Writeback_Inv_V, addr);
71+
}
72+
73+
static inline void flush_scache_line(unsigned long addr)
74+
{
75+
cache_op(Hit_Writeback_Inv_S, addr);
76+
}
77+
78+
#include <asm-generic/cacheflush.h>
79+
80+
#endif /* _ASM_CACHEFLUSH_H */

arch/loongarch/include/asm/cacheops.h

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Cache operations for the cache instruction.
4+
*
5+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6+
*/
7+
#ifndef __ASM_CACHEOPS_H
8+
#define __ASM_CACHEOPS_H
9+
10+
/*
11+
* Most cache ops are split into a 2 bit field identifying the cache, and a 3
12+
* bit field identifying the cache operation.
13+
*/
14+
#define CacheOp_Cache 0x03
15+
#define CacheOp_Op 0x1c
16+
17+
#define Cache_I 0x00
18+
#define Cache_D 0x01
19+
#define Cache_V 0x02
20+
#define Cache_S 0x03
21+
22+
#define Index_Invalidate 0x08
23+
#define Index_Writeback_Inv 0x08
24+
#define Hit_Invalidate 0x10
25+
#define Hit_Writeback_Inv 0x10
26+
#define CacheOp_User_Defined 0x18
27+
28+
#define Index_Invalidate_I (Cache_I | Index_Invalidate)
29+
#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv)
30+
#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv)
31+
#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv)
32+
#define Hit_Invalidate_I (Cache_I | Hit_Invalidate)
33+
#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv)
34+
#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv)
35+
#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv)
36+
37+
#endif /* __ASM_CACHEOPS_H */

arch/loongarch/include/asm/fixmap.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* fixmap.h: compile-time virtual memory allocation
4+
*
5+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6+
*/
7+
8+
#ifndef _ASM_FIXMAP_H
9+
#define _ASM_FIXMAP_H
10+
11+
#define NR_FIX_BTMAPS 64
12+
13+
#endif

arch/loongarch/include/asm/hugetlb.h

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
6+
#ifndef __ASM_HUGETLB_H
7+
#define __ASM_HUGETLB_H
8+
9+
#include <asm/page.h>
10+
11+
uint64_t pmd_to_entrylo(unsigned long pmd_val);
12+
13+
#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
14+
static inline int prepare_hugepage_range(struct file *file,
15+
unsigned long addr,
16+
unsigned long len)
17+
{
18+
unsigned long task_size = STACK_TOP;
19+
struct hstate *h = hstate_file(file);
20+
21+
if (len & ~huge_page_mask(h))
22+
return -EINVAL;
23+
if (addr & ~huge_page_mask(h))
24+
return -EINVAL;
25+
if (len > task_size)
26+
return -ENOMEM;
27+
if (task_size - len < addr)
28+
return -EINVAL;
29+
return 0;
30+
}
31+
32+
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
33+
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
34+
unsigned long addr, pte_t *ptep)
35+
{
36+
pte_t clear;
37+
pte_t pte = *ptep;
38+
39+
pte_val(clear) = (unsigned long)invalid_pte_table;
40+
set_pte_at(mm, addr, ptep, clear);
41+
return pte;
42+
}
43+
44+
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
45+
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
46+
unsigned long addr, pte_t *ptep)
47+
{
48+
pte_t pte;
49+
50+
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
51+
flush_tlb_page(vma, addr);
52+
return pte;
53+
}
54+
55+
#define __HAVE_ARCH_HUGE_PTE_NONE
56+
static inline int huge_pte_none(pte_t pte)
57+
{
58+
unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
59+
return !val || (val == (unsigned long)invalid_pte_table);
60+
}
61+
62+
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
63+
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
64+
unsigned long addr,
65+
pte_t *ptep, pte_t pte,
66+
int dirty)
67+
{
68+
int changed = !pte_same(*ptep, pte);
69+
70+
if (changed) {
71+
set_pte_at(vma->vm_mm, addr, ptep, pte);
72+
/*
73+
* There could be some standard sized pages in there,
74+
* get them all.
75+
*/
76+
flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
77+
}
78+
return changed;
79+
}
80+
81+
#include <asm-generic/hugetlb.h>
82+
83+
#endif /* __ASM_HUGETLB_H */

arch/loongarch/include/asm/page.h

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
#ifndef _ASM_PAGE_H
6+
#define _ASM_PAGE_H
7+
8+
#include <linux/const.h>
9+
10+
/*
11+
* PAGE_SHIFT determines the page size
12+
*/
13+
#ifdef CONFIG_PAGE_SIZE_4KB
14+
#define PAGE_SHIFT 12
15+
#endif
16+
#ifdef CONFIG_PAGE_SIZE_16KB
17+
#define PAGE_SHIFT 14
18+
#endif
19+
#ifdef CONFIG_PAGE_SIZE_64KB
20+
#define PAGE_SHIFT 16
21+
#endif
22+
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
23+
#define PAGE_MASK (~(PAGE_SIZE - 1))
24+
25+
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
26+
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
27+
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
28+
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
29+
30+
#ifndef __ASSEMBLY__
31+
32+
#include <linux/kernel.h>
33+
#include <linux/pfn.h>
34+
35+
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
36+
37+
/*
38+
* It's normally defined only for FLATMEM config but it's
39+
* used in our early mem init code for all memory models.
40+
* So always define it.
41+
*/
42+
#define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
43+
44+
extern void clear_page(void *page);
45+
extern void copy_page(void *to, void *from);
46+
47+
#define clear_user_page(page, vaddr, pg) clear_page(page)
48+
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
49+
50+
extern unsigned long shm_align_mask;
51+
52+
struct page;
53+
struct vm_area_struct;
54+
void copy_user_highpage(struct page *to, struct page *from,
55+
unsigned long vaddr, struct vm_area_struct *vma);
56+
57+
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
58+
59+
typedef struct { unsigned long pte; } pte_t;
60+
#define pte_val(x) ((x).pte)
61+
#define __pte(x) ((pte_t) { (x) })
62+
typedef struct page *pgtable_t;
63+
64+
typedef struct { unsigned long pgd; } pgd_t;
65+
#define pgd_val(x) ((x).pgd)
66+
#define __pgd(x) ((pgd_t) { (x) })
67+
68+
/*
69+
* Manipulate page protection bits
70+
*/
71+
typedef struct { unsigned long pgprot; } pgprot_t;
72+
#define pgprot_val(x) ((x).pgprot)
73+
#define __pgprot(x) ((pgprot_t) { (x) })
74+
#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
75+
76+
#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
77+
78+
/*
79+
* __pa()/__va() should be used only during mem init.
80+
*/
81+
#define __pa(x) PHYSADDR(x)
82+
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
83+
84+
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
85+
86+
#ifdef CONFIG_FLATMEM
87+
88+
static inline int pfn_valid(unsigned long pfn)
89+
{
90+
/* avoid <linux/mm.h> include hell */
91+
extern unsigned long max_mapnr;
92+
unsigned long pfn_offset = ARCH_PFN_OFFSET;
93+
94+
return pfn >= pfn_offset && pfn < max_mapnr;
95+
}
96+
97+
#endif
98+
99+
#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
100+
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
101+
102+
extern int __virt_addr_valid(volatile void *kaddr);
103+
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
104+
105+
#define VM_DATA_DEFAULT_FLAGS \
106+
(VM_READ | VM_WRITE | \
107+
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
108+
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
109+
110+
#include <asm-generic/memory_model.h>
111+
#include <asm-generic/getorder.h>
112+
113+
#endif /* !__ASSEMBLY__ */
114+
115+
#endif /* _ASM_PAGE_H */

0 commit comments

Comments
 (0)