Skip to content

Commit ce4a4e5

Browse files
amlutoIngo Molnar
authored andcommitted
x86/mm: Remove the UP asm/tlbflush.h code, always use the (formerly) SMP code
The UP asm/tlbflush.h generates somewhat nicer code than the SMP version. Aside from that, it's fallen quite a bit behind the SMP code: - flush_tlb_mm_range() didn't flush individual pages if the range was small. - The lazy TLB code was much weaker. This usually wouldn't matter, but, if a kernel thread flushed its lazy "active_mm" more than once (due to reclaim or similar), it wouldn't be unlazied and would instead pointlessly flush repeatedly. - Tracepoints were missing. Aside from that, simply having the UP code around was a maintanence burden, since it means that any change to the TLB flush code had to make sure not to break it. Simplify everything by deleting the UP code. Signed-off-by: Andy Lutomirski <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Arjan van de Ven <[email protected]> Cc: Borislav Petkov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Nadav Amit <[email protected]> Cc: Nadav Amit <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 3f79e4c commit ce4a4e5

File tree

8 files changed

+5
-104
lines changed

8 files changed

+5
-104
lines changed

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ config X86
6969
select ARCH_USE_BUILTIN_BSWAP
7070
select ARCH_USE_QUEUED_RWLOCKS
7171
select ARCH_USE_QUEUED_SPINLOCKS
72-
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
72+
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
7373
select ARCH_WANT_FRAME_POINTERS
7474
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
7575
select BUILDTIME_EXTABLE_SORT

arch/x86/include/asm/hardirq.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ typedef struct {
2222
#ifdef CONFIG_SMP
2323
unsigned int irq_resched_count;
2424
unsigned int irq_call_count;
25-
unsigned int irq_tlb_count;
2625
#endif
26+
unsigned int irq_tlb_count;
2727
#ifdef CONFIG_X86_THERMAL_VECTOR
2828
unsigned int irq_thermal_count;
2929
#endif

arch/x86/include/asm/mmu.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,6 @@ typedef struct {
3737
#endif
3838
} mm_context_t;
3939

40-
#ifdef CONFIG_SMP
4140
void leave_mm(int cpu);
42-
#else
43-
static inline void leave_mm(int cpu)
44-
{
45-
}
46-
#endif
4741

4842
#endif /* _ASM_X86_MMU_H */

arch/x86/include/asm/mmu_context.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,10 +99,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
9999

100100
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
101101
{
102-
#ifdef CONFIG_SMP
103102
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
104103
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
105-
#endif
106104
}
107105

108106
static inline int init_new_context(struct task_struct *tsk,

arch/x86/include/asm/tlbbatch.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,12 @@
33

44
#include <linux/cpumask.h>
55

6-
#ifdef CONFIG_SMP
76
struct arch_tlbflush_unmap_batch {
87
/*
98
* Each bit set is a CPU that potentially has a TLB entry for one of
109
* the PFNs being flushed..
1110
*/
1211
struct cpumask cpumask;
1312
};
14-
#endif
1513

1614
#endif /* _ARCH_X86_TLBBATCH_H */

arch/x86/include/asm/tlbflush.h

Lines changed: 1 addition & 75 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
#include <asm/processor.h>
88
#include <asm/cpufeature.h>
99
#include <asm/special_insns.h>
10+
#include <asm/smp.h>
1011

1112
static inline void __invpcid(unsigned long pcid, unsigned long addr,
1213
unsigned long type)
@@ -65,10 +66,8 @@ static inline void invpcid_flush_all_nonglobals(void)
6566
#endif
6667

6768
struct tlb_state {
68-
#ifdef CONFIG_SMP
6969
struct mm_struct *active_mm;
7070
int state;
71-
#endif
7271

7372
/*
7473
* Access to this CR4 shadow and to H/W CR4 is protected by
@@ -231,77 +230,6 @@ struct flush_tlb_info {
231230
unsigned long end;
232231
};
233232

234-
#ifndef CONFIG_SMP
235-
236-
/* "_up" is for UniProcessor.
237-
*
238-
* This is a helper for other header functions. *Not* intended to be called
239-
* directly. All global TLB flushes need to either call this, or to bump the
240-
* vm statistics themselves.
241-
*/
242-
static inline void __flush_tlb_up(void)
243-
{
244-
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
245-
__flush_tlb();
246-
}
247-
248-
static inline void flush_tlb_all(void)
249-
{
250-
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
251-
__flush_tlb_all();
252-
}
253-
254-
static inline void local_flush_tlb(void)
255-
{
256-
__flush_tlb_up();
257-
}
258-
259-
static inline void flush_tlb_mm(struct mm_struct *mm)
260-
{
261-
if (mm == current->active_mm)
262-
__flush_tlb_up();
263-
}
264-
265-
static inline void flush_tlb_page(struct vm_area_struct *vma,
266-
unsigned long addr)
267-
{
268-
if (vma->vm_mm == current->active_mm)
269-
__flush_tlb_one(addr);
270-
}
271-
272-
static inline void flush_tlb_range(struct vm_area_struct *vma,
273-
unsigned long start, unsigned long end)
274-
{
275-
if (vma->vm_mm == current->active_mm)
276-
__flush_tlb_up();
277-
}
278-
279-
static inline void flush_tlb_mm_range(struct mm_struct *mm,
280-
unsigned long start, unsigned long end, unsigned long vmflag)
281-
{
282-
if (mm == current->active_mm)
283-
__flush_tlb_up();
284-
}
285-
286-
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
287-
const struct flush_tlb_info *info)
288-
{
289-
}
290-
291-
static inline void reset_lazy_tlbstate(void)
292-
{
293-
}
294-
295-
static inline void flush_tlb_kernel_range(unsigned long start,
296-
unsigned long end)
297-
{
298-
flush_tlb_all();
299-
}
300-
301-
#else /* SMP */
302-
303-
#include <asm/smp.h>
304-
305233
#define local_flush_tlb() __flush_tlb()
306234

307235
#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
@@ -339,8 +267,6 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
339267

340268
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
341269

342-
#endif /* SMP */
343-
344270
#ifndef CONFIG_PARAVIRT
345271
#define flush_tlb_others(mask, info) \
346272
native_flush_tlb_others(mask, info)

arch/x86/mm/init.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -811,10 +811,8 @@ void __init zone_sizes_init(void)
811811
}
812812

813813
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
814-
#ifdef CONFIG_SMP
815814
.active_mm = &init_mm,
816815
.state = 0,
817-
#endif
818816
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
819817
};
820818
EXPORT_SYMBOL_GPL(cpu_tlbstate);

arch/x86/mm/tlb.c

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
#include <linux/debugfs.h>
1616

1717
/*
18-
* Smarter SMP flushing macros.
18+
* TLB flushing, formerly SMP-only
1919
* c/o Linus Torvalds.
2020
*
2121
* These mean you can really definitely utterly forget about
@@ -28,8 +28,6 @@
2828
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
2929
*/
3030

31-
#ifdef CONFIG_SMP
32-
3331
/*
3432
* We cannot call mmdrop() because we are in interrupt context,
3533
* instead update mm->cpu_vm_mask.
@@ -53,8 +51,6 @@ void leave_mm(int cpu)
5351
}
5452
EXPORT_SYMBOL_GPL(leave_mm);
5553

56-
#endif /* CONFIG_SMP */
57-
5854
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
5955
struct task_struct *tsk)
6056
{
@@ -85,10 +81,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
8581
set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
8682
}
8783

88-
#ifdef CONFIG_SMP
8984
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9085
this_cpu_write(cpu_tlbstate.active_mm, next);
91-
#endif
9286

9387
cpumask_set_cpu(cpu, mm_cpumask(next));
9488

@@ -146,9 +140,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
146140
if (unlikely(prev->context.ldt != next->context.ldt))
147141
load_mm_ldt(next);
148142
#endif
149-
}
150-
#ifdef CONFIG_SMP
151-
else {
143+
} else {
152144
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
153145
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
154146

@@ -175,11 +167,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
175167
load_mm_ldt(next);
176168
}
177169
}
178-
#endif
179170
}
180171

181-
#ifdef CONFIG_SMP
182-
183172
/*
184173
* The flush IPI assumes that a thread switch happens in this order:
185174
* [cpu0: the cpu that switches]
@@ -436,5 +425,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
436425
return 0;
437426
}
438427
late_initcall(create_tlb_single_page_flush_ceiling);
439-
440-
#endif /* CONFIG_SMP */

0 commit comments

Comments
 (0)