Skip to content

Commit 42f3bdc

Browse files
Peter ZijlstraKAGA-KOKO
authored andcommitted
x86/events/intel/ds: Use the proper cache flush method for mapping ds buffers
Thomas reported the following warning: BUG: using smp_processor_id() in preemptible [00000000] code: ovsdb-server/4498 caller is native_flush_tlb_single+0x57/0xc0 native_flush_tlb_single+0x57/0xc0 __set_pte_vaddr+0x2d/0x40 set_pte_vaddr+0x2f/0x40 cea_set_pte+0x30/0x40 ds_update_cea.constprop.4+0x4d/0x70 reserve_ds_buffers+0x159/0x410 x86_reserve_hardware+0x150/0x160 x86_pmu_event_init+0x3e/0x1f0 perf_try_init_event+0x69/0x80 perf_event_alloc+0x652/0x740 SyS_perf_event_open+0x3f6/0xd60 do_syscall_64+0x5c/0x190 set_pte_vaddr is used to map the ds buffers into the cpu entry area, but there are two problems with that: 1) The resulting flush is not supposed to be called in preemptible context 2) The cpu entry area is supposed to be per CPU, but the debug store buffers are mapped for all CPUs so these mappings need to be flushed globally. Add the necessary preemption protection across the mapping code and flush TLBs globally. Fixes: c1961a4 ("x86/events/intel/ds: Map debug buffers in cpu_entry_area") Reported-by: Thomas Zeitlhofer <[email protected]> Signed-off-by: Peter Zijlstra <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Tested-by: Thomas Zeitlhofer <[email protected]> Cc: Greg Kroah-Hartman <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected]
1 parent 1dddd25 commit 42f3bdc

File tree

1 file changed

+16
-0
lines changed
  • arch/x86/events/intel

1 file changed

+16
-0
lines changed

arch/x86/events/intel/ds.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <asm/cpu_entry_area.h>
77
#include <asm/perf_event.h>
8+
#include <asm/tlbflush.h>
89
#include <asm/insn.h>
910

1011
#include "../perf_event.h"
@@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
283284

284285
static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
285286
{
287+
unsigned long start = (unsigned long)cea;
286288
phys_addr_t pa;
287289
size_t msz = 0;
288290

289291
pa = virt_to_phys(addr);
292+
293+
preempt_disable();
290294
for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
291295
cea_set_pte(cea, pa, prot);
296+
297+
/*
298+
* This is a cross-CPU update of the cpu_entry_area, we must shoot down
299+
* all TLB entries for it.
300+
*/
301+
flush_tlb_kernel_range(start, start + size);
302+
preempt_enable();
292303
}
293304

294305
static void ds_clear_cea(void *cea, size_t size)
295306
{
307+
unsigned long start = (unsigned long)cea;
296308
size_t msz = 0;
297309

310+
preempt_disable();
298311
for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
299312
cea_set_pte(cea, 0, PAGE_NONE);
313+
314+
flush_tlb_kernel_range(start, start + size);
315+
preempt_enable();
300316
}
301317

302318
static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)

0 commit comments

Comments
 (0)