Skip to content

Commit 803b0fc

Browse files
committed
LoongArch: Add process management
Add process management support for LoongArch, including: thread info definition, context switch and process tracing. Reviewed-by: WANG Xuerui <[email protected]> Reviewed-by: Jiaxun Yang <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent 0603839 commit 803b0fc

File tree

14 files changed

+1865
-0
lines changed

14 files changed

+1865
-0
lines changed

arch/loongarch/include/asm/fpu.h

Lines changed: 129 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Author: Huacai Chen <[email protected]>
4+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5+
*/
6+
#ifndef _ASM_FPU_H
7+
#define _ASM_FPU_H
8+
9+
#include <linux/sched.h>
10+
#include <linux/sched/task_stack.h>
11+
#include <linux/ptrace.h>
12+
#include <linux/thread_info.h>
13+
#include <linux/bitops.h>
14+
15+
#include <asm/cpu.h>
16+
#include <asm/cpu-features.h>
17+
#include <asm/current.h>
18+
#include <asm/loongarch.h>
19+
#include <asm/processor.h>
20+
#include <asm/ptrace.h>
21+
22+
struct sigcontext;
23+
24+
extern void _init_fpu(unsigned int);
25+
extern void _save_fp(struct loongarch_fpu *);
26+
extern void _restore_fp(struct loongarch_fpu *);
27+
28+
/*
29+
* Mask the FCSR Cause bits according to the Enable bits, observing
30+
* that Unimplemented is always enabled.
31+
*/
32+
static inline unsigned long mask_fcsr_x(unsigned long fcsr)
33+
{
34+
return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
35+
(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
36+
}
37+
38+
static inline int is_fp_enabled(void)
39+
{
40+
return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
41+
1 : 0;
42+
}
43+
44+
#define enable_fpu() set_csr_euen(CSR_EUEN_FPEN)
45+
46+
#define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN)
47+
48+
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
49+
50+
static inline int is_fpu_owner(void)
51+
{
52+
return test_thread_flag(TIF_USEDFPU);
53+
}
54+
55+
static inline void __own_fpu(void)
56+
{
57+
enable_fpu();
58+
set_thread_flag(TIF_USEDFPU);
59+
KSTK_EUEN(current) |= CSR_EUEN_FPEN;
60+
}
61+
62+
static inline void own_fpu_inatomic(int restore)
63+
{
64+
if (cpu_has_fpu && !is_fpu_owner()) {
65+
__own_fpu();
66+
if (restore)
67+
_restore_fp(&current->thread.fpu);
68+
}
69+
}
70+
71+
static inline void own_fpu(int restore)
72+
{
73+
preempt_disable();
74+
own_fpu_inatomic(restore);
75+
preempt_enable();
76+
}
77+
78+
static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
79+
{
80+
if (is_fpu_owner()) {
81+
if (save)
82+
_save_fp(&tsk->thread.fpu);
83+
disable_fpu();
84+
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
85+
}
86+
KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
87+
}
88+
89+
static inline void lose_fpu(int save)
90+
{
91+
preempt_disable();
92+
lose_fpu_inatomic(save, current);
93+
preempt_enable();
94+
}
95+
96+
static inline void init_fpu(void)
97+
{
98+
unsigned int fcsr = current->thread.fpu.fcsr;
99+
100+
__own_fpu();
101+
_init_fpu(fcsr);
102+
set_used_math();
103+
}
104+
105+
static inline void save_fp(struct task_struct *tsk)
106+
{
107+
if (cpu_has_fpu)
108+
_save_fp(&tsk->thread.fpu);
109+
}
110+
111+
static inline void restore_fp(struct task_struct *tsk)
112+
{
113+
if (cpu_has_fpu)
114+
_restore_fp(&tsk->thread.fpu);
115+
}
116+
117+
static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
118+
{
119+
if (tsk == current) {
120+
preempt_disable();
121+
if (is_fpu_owner())
122+
_save_fp(&current->thread.fpu);
123+
preempt_enable();
124+
}
125+
126+
return tsk->thread.fpu.fpr;
127+
}
128+
129+
#endif /* _ASM_FPU_H */

arch/loongarch/include/asm/idle.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef __ASM_IDLE_H
3+
#define __ASM_IDLE_H
4+
5+
#include <linux/linkage.h>
6+
7+
extern asmlinkage void __arch_cpu_idle(void);
8+
9+
#endif /* __ASM_IDLE_H */

arch/loongarch/include/asm/mmu.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4+
*/
5+
#ifndef __ASM_MMU_H
6+
#define __ASM_MMU_H
7+
8+
#include <linux/atomic.h>
9+
#include <linux/spinlock.h>
10+
11+
typedef struct {
12+
u64 asid[NR_CPUS];
13+
void *vdso;
14+
} mm_context_t;
15+
16+
#endif /* __ASM_MMU_H */
Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Switch a MMU context.
4+
*
5+
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6+
*/
7+
#ifndef _ASM_MMU_CONTEXT_H
8+
#define _ASM_MMU_CONTEXT_H
9+
10+
#include <linux/errno.h>
11+
#include <linux/sched.h>
12+
#include <linux/mm_types.h>
13+
#include <linux/smp.h>
14+
#include <linux/slab.h>
15+
16+
#include <asm/cacheflush.h>
17+
#include <asm/tlbflush.h>
18+
#include <asm-generic/mm_hooks.h>
19+
20+
/*
21+
* All unused by hardware upper bits will be considered
22+
* as a software asid extension.
23+
*/
24+
static inline u64 asid_version_mask(unsigned int cpu)
25+
{
26+
return ~(u64)(cpu_asid_mask(&cpu_data[cpu]));
27+
}
28+
29+
static inline u64 asid_first_version(unsigned int cpu)
30+
{
31+
return cpu_asid_mask(&cpu_data[cpu]) + 1;
32+
}
33+
34+
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
35+
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
36+
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
37+
38+
static inline int asid_valid(struct mm_struct *mm, unsigned int cpu)
39+
{
40+
if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & asid_version_mask(cpu))
41+
return 0;
42+
43+
return 1;
44+
}
45+
46+
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
47+
{
48+
}
49+
50+
/* Normal, classic get_new_mmu_context */
51+
static inline void
52+
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
53+
{
54+
u64 asid = asid_cache(cpu);
55+
56+
if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
57+
local_flush_tlb_user(); /* start new asid cycle */
58+
59+
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
60+
}
61+
62+
/*
63+
* Initialize the context related info for a new mm_struct
64+
* instance.
65+
*/
66+
static inline int
67+
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
68+
{
69+
int i;
70+
71+
for_each_possible_cpu(i)
72+
cpu_context(i, mm) = 0;
73+
74+
return 0;
75+
}
76+
77+
static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
78+
struct task_struct *tsk)
79+
{
80+
unsigned int cpu = smp_processor_id();
81+
82+
/* Check if our ASID is of an older version and thus invalid */
83+
if (!asid_valid(next, cpu))
84+
get_new_mmu_context(next, cpu);
85+
86+
write_csr_asid(cpu_asid(cpu, next));
87+
88+
if (next != &init_mm)
89+
csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
90+
else
91+
csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
92+
93+
/*
94+
* Mark current->active_mm as not "active" anymore.
95+
* We don't want to mislead possible IPI tlb flush routines.
96+
*/
97+
cpumask_set_cpu(cpu, mm_cpumask(next));
98+
}
99+
100+
#define switch_mm_irqs_off switch_mm_irqs_off
101+
102+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
103+
struct task_struct *tsk)
104+
{
105+
unsigned long flags;
106+
107+
local_irq_save(flags);
108+
switch_mm_irqs_off(prev, next, tsk);
109+
local_irq_restore(flags);
110+
}
111+
112+
/*
113+
* Destroy context related info for an mm_struct that is about
114+
* to be put to rest.
115+
*/
116+
static inline void destroy_context(struct mm_struct *mm)
117+
{
118+
}
119+
120+
#define activate_mm(prev, next) switch_mm(prev, next, current)
121+
#define deactivate_mm(task, mm) do { } while (0)
122+
123+
/*
124+
* If mm is currently active, we can't really drop it.
125+
* Instead, we will get a new one for it.
126+
*/
127+
static inline void
128+
drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
129+
{
130+
int asid;
131+
unsigned long flags;
132+
133+
local_irq_save(flags);
134+
135+
asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
136+
137+
if (asid == cpu_asid(cpu, mm)) {
138+
if (!current->mm || (current->mm == mm)) {
139+
get_new_mmu_context(mm, cpu);
140+
write_csr_asid(cpu_asid(cpu, mm));
141+
goto out;
142+
}
143+
}
144+
145+
/* Will get a new context next time */
146+
cpu_context(cpu, mm) = 0;
147+
cpumask_clear_cpu(cpu, mm_cpumask(mm));
148+
out:
149+
local_irq_restore(flags);
150+
}
151+
152+
#endif /* _ASM_MMU_CONTEXT_H */

0 commit comments

Comments
 (0)