Skip to content

Commit 26bcd64

Browse files
Naoya Horiguchitorvalds
authored andcommitted
memcg: cleanup preparation for page table walk
pagewalk.c can handle vma in itself, so we don't have to pass vma via walk->private. And both of mem_cgroup_count_precharge() and mem_cgroup_move_charge() do for each vma loop themselves, but now it's done in pagewalk.c, so let's clean up them. Signed-off-by: Naoya Horiguchi <[email protected]> Acked-by: Johannes Weiner <[email protected]> Cc: "Kirill A. Shutemov" <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Cyrill Gorcunov <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: Pavel Emelyanov <[email protected]> Cc: Benjamin Herrenschmidt <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent d85f4d6 commit 26bcd64

File tree

1 file changed

+16
-33
lines changed

1 file changed

+16
-33
lines changed

mm/memcontrol.c

Lines changed: 16 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -4839,7 +4839,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
48394839
unsigned long addr, unsigned long end,
48404840
struct mm_walk *walk)
48414841
{
4842-
struct vm_area_struct *vma = walk->private;
4842+
struct vm_area_struct *vma = walk->vma;
48434843
pte_t *pte;
48444844
spinlock_t *ptl;
48454845

@@ -4865,20 +4865,13 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
48654865
static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
48664866
{
48674867
unsigned long precharge;
4868-
struct vm_area_struct *vma;
48694868

4869+
struct mm_walk mem_cgroup_count_precharge_walk = {
4870+
.pmd_entry = mem_cgroup_count_precharge_pte_range,
4871+
.mm = mm,
4872+
};
48704873
down_read(&mm->mmap_sem);
4871-
for (vma = mm->mmap; vma; vma = vma->vm_next) {
4872-
struct mm_walk mem_cgroup_count_precharge_walk = {
4873-
.pmd_entry = mem_cgroup_count_precharge_pte_range,
4874-
.mm = mm,
4875-
.private = vma,
4876-
};
4877-
if (is_vm_hugetlb_page(vma))
4878-
continue;
4879-
walk_page_range(vma->vm_start, vma->vm_end,
4880-
&mem_cgroup_count_precharge_walk);
4881-
}
4874+
walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
48824875
up_read(&mm->mmap_sem);
48834876

48844877
precharge = mc.precharge;
@@ -5011,7 +5004,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
50115004
struct mm_walk *walk)
50125005
{
50135006
int ret = 0;
5014-
struct vm_area_struct *vma = walk->private;
5007+
struct vm_area_struct *vma = walk->vma;
50155008
pte_t *pte;
50165009
spinlock_t *ptl;
50175010
enum mc_target_type target_type;
@@ -5107,7 +5100,10 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
51075100

51085101
static void mem_cgroup_move_charge(struct mm_struct *mm)
51095102
{
5110-
struct vm_area_struct *vma;
5103+
struct mm_walk mem_cgroup_move_charge_walk = {
5104+
.pmd_entry = mem_cgroup_move_charge_pte_range,
5105+
.mm = mm,
5106+
};
51115107

51125108
lru_add_drain_all();
51135109
/*
@@ -5130,24 +5126,11 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
51305126
cond_resched();
51315127
goto retry;
51325128
}
5133-
for (vma = mm->mmap; vma; vma = vma->vm_next) {
5134-
int ret;
5135-
struct mm_walk mem_cgroup_move_charge_walk = {
5136-
.pmd_entry = mem_cgroup_move_charge_pte_range,
5137-
.mm = mm,
5138-
.private = vma,
5139-
};
5140-
if (is_vm_hugetlb_page(vma))
5141-
continue;
5142-
ret = walk_page_range(vma->vm_start, vma->vm_end,
5143-
&mem_cgroup_move_charge_walk);
5144-
if (ret)
5145-
/*
5146-
* means we have consumed all precharges and failed in
5147-
* doing additional charge. Just abandon here.
5148-
*/
5149-
break;
5150-
}
5129+
/*
5130+
* When we have consumed all precharges and failed in doing
5131+
* additional charge, the page walk just aborts.
5132+
*/
5133+
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
51515134
up_read(&mm->mmap_sem);
51525135
atomic_dec(&mc.from->moving_account);
51535136
}

0 commit comments

Comments
 (0)