|
5 | 5 |
|
6 | 6 | #include <asm/cpu_entry_area.h>
|
7 | 7 | #include <asm/perf_event.h>
|
| 8 | +#include <asm/tlbflush.h> |
8 | 9 | #include <asm/insn.h>
|
9 | 10 |
|
10 | 11 | #include "../perf_event.h"
|
@@ -283,20 +284,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
|
283 | 284 |
|
284 | 285 | static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
|
285 | 286 | {
|
| 287 | + unsigned long start = (unsigned long)cea; |
286 | 288 | phys_addr_t pa;
|
287 | 289 | size_t msz = 0;
|
288 | 290 |
|
289 | 291 | pa = virt_to_phys(addr);
|
| 292 | + |
| 293 | + preempt_disable(); |
290 | 294 | for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
|
291 | 295 | cea_set_pte(cea, pa, prot);
|
| 296 | + |
| 297 | + /* |
| 298 | + * This is a cross-CPU update of the cpu_entry_area, we must shoot down |
| 299 | + * all TLB entries for it. |
| 300 | + */ |
| 301 | + flush_tlb_kernel_range(start, start + size); |
| 302 | + preempt_enable(); |
292 | 303 | }
|
293 | 304 |
|
294 | 305 | static void ds_clear_cea(void *cea, size_t size)
|
295 | 306 | {
|
| 307 | + unsigned long start = (unsigned long)cea; |
296 | 308 | size_t msz = 0;
|
297 | 309 |
|
| 310 | + preempt_disable(); |
298 | 311 | for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
|
299 | 312 | cea_set_pte(cea, 0, PAGE_NONE);
|
| 313 | + |
| 314 | + flush_tlb_kernel_range(start, start + size); |
| 315 | + preempt_enable(); |
300 | 316 | }
|
301 | 317 |
|
302 | 318 | static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
|
|
0 commit comments