Skip to content

Commit 763b218

Browse files
Joel Fernandestorvalds
authored andcommitted
mm: add preempt points into __purge_vmap_area_lazy()
Use cond_resched_lock to avoid holding the vmap_area_lock for a potentially long time and thus creating bad latencies for various workloads. [hch: split from a larger patch by Joel, wrote the crappy changelog] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Joel Fernandes <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]> Tested-by: Jisheng Zhang <[email protected]> Cc: Andrey Ryabinin <[email protected]> Cc: Chris Wilson <[email protected]> Cc: John Dias <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Ingo Molnar <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent f9e0997 commit 763b218

File tree

1 file changed

+9
-5
lines changed

1 file changed

+9
-5
lines changed

mm/vmalloc.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -628,7 +628,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
628628
struct llist_node *valist;
629629
struct vmap_area *va;
630630
struct vmap_area *n_va;
631-
int nr = 0;
631+
bool do_free = false;
632632

633633
lockdep_assert_held(&vmap_purge_lock);
634634

@@ -638,18 +638,22 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
638638
start = va->va_start;
639639
if (va->va_end > end)
640640
end = va->va_end;
641-
nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
641+
do_free = true;
642642
}
643643

644-
if (!nr)
644+
if (!do_free)
645645
return false;
646646

647-
atomic_sub(nr, &vmap_lazy_nr);
648647
flush_tlb_kernel_range(start, end);
649648

650649
spin_lock(&vmap_area_lock);
651-
llist_for_each_entry_safe(va, n_va, valist, purge_list)
650+
llist_for_each_entry_safe(va, n_va, valist, purge_list) {
651+
int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
652+
652653
__free_vmap_area(va);
654+
atomic_sub(nr, &vmap_lazy_nr);
655+
cond_resched_lock(&vmap_area_lock);
656+
}
653657
spin_unlock(&vmap_area_lock);
654658
return true;
655659
}

0 commit comments

Comments
 (0)